mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 09:06:44 +07:00
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates This series contains updates to e100, igb, igbvf, ixgbe and ixgbevf. Stefan adds a igb patch to enable the ability strip VLAN header information for packets bound for a VM on i350 hardware. Joe Perches provides patches for e100, igb, igbvf, ixgbe and ixgbevf to convert the use of __constant_<foo> to just <foo> to align with the rest of the kernel. Don provides two fixes for ixgbe, first resolves a link issue with DA cables where we were not always freeing the firmware/software semaphore after grabbing it. Second stops caching whether the management firmware was enabled, however since this is not static, we really need to verify with each check. Jacob provides six fixes/cleanups for ixgbe, most notably, correct the stop_mac_link_on d3() to check the Core Clock Disable bit before stopping link and to fully check to see if manage firmware is running or could be enabled before bringing down the link. Fix flow control auto-negation for KR/KX/K4 interfaces, since setting up MAC link, the cached autoc value and current autoc value were being incorrectly used to determine whether link reset is required. Emil provides a fix for ixgbe where there was a chance for aggressive start_ndo_zmit() callers to sneak packets between enabling the Tx queues and the link coming up. To resolve this, move the call to enable Tx queues to after the link is established. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
c60c8337ef
@ -1778,9 +1778,9 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
|
||||
* testing, ie sending frames with bad CRC.
|
||||
*/
|
||||
if (unlikely(skb->no_fcs))
|
||||
cb->command |= __constant_cpu_to_le16(cb_tx_nc);
|
||||
cb->command |= cpu_to_le16(cb_tx_nc);
|
||||
else
|
||||
cb->command &= ~__constant_cpu_to_le16(cb_tx_nc);
|
||||
cb->command &= ~cpu_to_le16(cb_tx_nc);
|
||||
|
||||
/* interrupt every 16 packets regardless of delay */
|
||||
if ((nic->cbs_avail & ~15) == nic->cbs_avail)
|
||||
|
@ -230,6 +230,10 @@ struct e1000_adv_tx_context_desc {
|
||||
#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
|
||||
#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
|
||||
|
||||
#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */
|
||||
#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
|
||||
#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */
|
||||
|
||||
#define E1000_VLVF_ARRAY_SIZE 32
|
||||
#define E1000_VLVF_VLANID_MASK 0x00000FFF
|
||||
#define E1000_VLVF_POOLSEL_SHIFT 12
|
||||
|
@ -357,6 +357,7 @@
|
||||
#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
|
||||
#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
|
||||
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
|
||||
#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n)))
|
||||
#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
|
||||
* Filter - RW */
|
||||
#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
|
||||
|
@ -3542,6 +3542,13 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
|
||||
|
||||
vmolr = rd32(E1000_VMOLR(vfn));
|
||||
vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
|
||||
if (hw->mac.type == e1000_i350) {
|
||||
u32 dvmolr;
|
||||
|
||||
dvmolr = rd32(E1000_DVMOLR(vfn));
|
||||
dvmolr |= E1000_DVMOLR_STRVLAN;
|
||||
wr32(E1000_DVMOLR(vfn), dvmolr);
|
||||
}
|
||||
if (aupe)
|
||||
vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
|
||||
else
|
||||
@ -4585,7 +4592,7 @@ static int igb_tso(struct igb_ring *tx_ring,
|
||||
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
|
||||
type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
|
||||
|
||||
if (first->protocol == __constant_htons(ETH_P_IP)) {
|
||||
if (first->protocol == htons(ETH_P_IP)) {
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
iph->tot_len = 0;
|
||||
iph->check = 0;
|
||||
@ -4641,12 +4648,12 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
|
||||
} else {
|
||||
u8 l4_hdr = 0;
|
||||
switch (first->protocol) {
|
||||
case __constant_htons(ETH_P_IP):
|
||||
case htons(ETH_P_IP):
|
||||
vlan_macip_lens |= skb_network_header_len(skb);
|
||||
type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
|
||||
l4_hdr = ip_hdr(skb)->protocol;
|
||||
break;
|
||||
case __constant_htons(ETH_P_IPV6):
|
||||
case htons(ETH_P_IPV6):
|
||||
vlan_macip_lens |= skb_network_header_len(skb);
|
||||
l4_hdr = ipv6_hdr(skb)->nexthdr;
|
||||
break;
|
||||
@ -6731,7 +6738,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
|
||||
hdr.network += ETH_HLEN;
|
||||
|
||||
/* handle any vlan tag if present */
|
||||
if (protocol == __constant_htons(ETH_P_8021Q)) {
|
||||
if (protocol == htons(ETH_P_8021Q)) {
|
||||
if ((hdr.network - data) > (max_len - VLAN_HLEN))
|
||||
return max_len;
|
||||
|
||||
@ -6740,7 +6747,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
|
||||
}
|
||||
|
||||
/* handle L3 protocols */
|
||||
if (protocol == __constant_htons(ETH_P_IP)) {
|
||||
if (protocol == htons(ETH_P_IP)) {
|
||||
if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
|
||||
return max_len;
|
||||
|
||||
@ -6754,7 +6761,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
|
||||
/* record next protocol if header is present */
|
||||
if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
|
||||
nexthdr = hdr.ipv4->protocol;
|
||||
} else if (protocol == __constant_htons(ETH_P_IPV6)) {
|
||||
} else if (protocol == htons(ETH_P_IPV6)) {
|
||||
if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
|
||||
return max_len;
|
||||
|
||||
|
@ -2014,12 +2014,12 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
switch (skb->protocol) {
|
||||
case __constant_htons(ETH_P_IP):
|
||||
case htons(ETH_P_IP):
|
||||
tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
|
||||
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
|
||||
tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
|
||||
break;
|
||||
case __constant_htons(ETH_P_IPV6):
|
||||
case htons(ETH_P_IPV6):
|
||||
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
|
||||
tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
|
||||
break;
|
||||
|
@ -1315,7 +1315,6 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
|
||||
.release_swfw_sync = &ixgbe_release_swfw_sync,
|
||||
.get_thermal_sensor_data = NULL,
|
||||
.init_thermal_sensor_thresh = NULL,
|
||||
.mng_fw_enabled = NULL,
|
||||
.prot_autoc_read = &prot_autoc_read_generic,
|
||||
.prot_autoc_write = &prot_autoc_write_generic,
|
||||
};
|
||||
|
@ -67,7 +67,7 @@ static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
|
||||
static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
|
||||
static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
|
||||
|
||||
static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
|
||||
bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
|
||||
{
|
||||
u32 fwsm, manc, factps;
|
||||
|
||||
@ -94,7 +94,7 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
|
||||
* and MNG not enabled
|
||||
*/
|
||||
if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
|
||||
!hw->mng_fw_enabled) {
|
||||
!ixgbe_mng_enabled(hw)) {
|
||||
mac->ops.disable_tx_laser =
|
||||
&ixgbe_disable_tx_laser_multispeed_fiber;
|
||||
mac->ops.enable_tx_laser =
|
||||
@ -210,7 +210,7 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
|
||||
if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
|
||||
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
|
||||
IXGBE_GSSR_MAC_CSR_SM);
|
||||
if (!ret_val)
|
||||
if (ret_val)
|
||||
return IXGBE_ERR_SWFW_SYNC;
|
||||
|
||||
*locked = true;
|
||||
@ -245,8 +245,10 @@ static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
|
||||
if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
|
||||
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
|
||||
IXGBE_GSSR_MAC_CSR_SM);
|
||||
if (!ret_val)
|
||||
if (ret_val)
|
||||
return IXGBE_ERR_SWFW_SYNC;
|
||||
|
||||
locked = true;
|
||||
}
|
||||
|
||||
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
|
||||
@ -515,9 +517,17 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
|
||||
**/
|
||||
static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
|
||||
{
|
||||
u32 autoc2_reg;
|
||||
u32 autoc2_reg, fwsm;
|
||||
u16 ee_ctrl_2 = 0;
|
||||
|
||||
if (!hw->mng_fw_enabled && !hw->wol_enabled) {
|
||||
hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
|
||||
|
||||
/* Check to see if MNG FW could be enabled */
|
||||
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
|
||||
|
||||
if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
|
||||
!hw->wol_enabled &&
|
||||
ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
|
||||
autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
|
||||
autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
|
||||
@ -653,75 +663,6 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
|
||||
* @hw: pointer to hardware structure
|
||||
* @speed: link speed to set
|
||||
*
|
||||
* We set the module speed differently for fixed fiber. For other
|
||||
* multi-speed devices we don't have an error value so here if we
|
||||
* detect an error we just log it and exit.
|
||||
*/
|
||||
static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
|
||||
ixgbe_link_speed speed)
|
||||
{
|
||||
s32 status;
|
||||
u8 rs, eeprom_data;
|
||||
|
||||
switch (speed) {
|
||||
case IXGBE_LINK_SPEED_10GB_FULL:
|
||||
/* one bit mask same as setting on */
|
||||
rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
|
||||
break;
|
||||
case IXGBE_LINK_SPEED_1GB_FULL:
|
||||
rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
|
||||
break;
|
||||
default:
|
||||
hw_dbg(hw, "Invalid fixed module speed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Set RS0 */
|
||||
status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
|
||||
IXGBE_I2C_EEPROM_DEV_ADDR2,
|
||||
&eeprom_data);
|
||||
if (status) {
|
||||
hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
|
||||
|
||||
status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
|
||||
IXGBE_I2C_EEPROM_DEV_ADDR2,
|
||||
eeprom_data);
|
||||
if (status) {
|
||||
hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Set RS1 */
|
||||
status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
|
||||
IXGBE_I2C_EEPROM_DEV_ADDR2,
|
||||
&eeprom_data);
|
||||
if (status) {
|
||||
hw_dbg(hw, "Failed to read Rx Rate Select RS1\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
|
||||
|
||||
status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
|
||||
IXGBE_I2C_EEPROM_DEV_ADDR2,
|
||||
eeprom_data);
|
||||
if (status) {
|
||||
hw_dbg(hw, "Failed to write Rx Rate Select RS1\n");
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
|
||||
* @hw: pointer to hardware structure
|
||||
@ -832,10 +773,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
|
||||
|
||||
/* Set the module link speed */
|
||||
switch (hw->phy.media_type) {
|
||||
case ixgbe_media_type_fiber_fixed:
|
||||
ixgbe_set_fiber_fixed_speed(hw,
|
||||
IXGBE_LINK_SPEED_1GB_FULL);
|
||||
break;
|
||||
case ixgbe_media_type_fiber:
|
||||
esdp_reg &= ~IXGBE_ESDP_SDP5;
|
||||
esdp_reg |= IXGBE_ESDP_SDP5_DIR;
|
||||
@ -1021,15 +958,19 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
|
||||
ixgbe_link_speed speed,
|
||||
bool autoneg_wait_to_complete)
|
||||
{
|
||||
s32 status = 0;
|
||||
u32 autoc, pma_pmd_1g, link_mode, start_autoc;
|
||||
u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
|
||||
u32 orig_autoc = 0;
|
||||
u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
|
||||
u32 links_reg;
|
||||
u32 i;
|
||||
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
|
||||
bool autoneg = false;
|
||||
s32 status = 0;
|
||||
u32 pma_pmd_1g, link_mode, links_reg, i;
|
||||
u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
|
||||
u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
|
||||
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
|
||||
|
||||
/* holds the value of AUTOC register at this current point in time */
|
||||
u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
|
||||
/* holds the cached value of AUTOC register */
|
||||
u32 orig_autoc = 0;
|
||||
/* temporary variable used for comparison purposes */
|
||||
u32 autoc = current_autoc;
|
||||
|
||||
/* Check to see if speed passed in is supported. */
|
||||
status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
|
||||
@ -1046,12 +987,10 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
|
||||
|
||||
/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
|
||||
if (hw->mac.orig_link_settings_stored)
|
||||
autoc = hw->mac.orig_autoc;
|
||||
orig_autoc = hw->mac.orig_autoc;
|
||||
else
|
||||
autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
|
||||
orig_autoc = autoc;
|
||||
|
||||
orig_autoc = autoc;
|
||||
start_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
|
||||
link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
|
||||
pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
|
||||
|
||||
@ -1091,10 +1030,10 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
|
||||
}
|
||||
}
|
||||
|
||||
if (autoc != start_autoc) {
|
||||
if (autoc != current_autoc) {
|
||||
/* Restart link */
|
||||
status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
|
||||
if (!status)
|
||||
if (status)
|
||||
goto out;
|
||||
|
||||
/* Only poll for autoneg to complete if specified to do so */
|
||||
@ -1267,7 +1206,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
|
||||
* Likewise if we support WoL we don't want change the
|
||||
* LMS state either.
|
||||
*/
|
||||
if ((hw->phy.multispeed_fiber && hw->mng_fw_enabled) ||
|
||||
if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
|
||||
hw->wol_enabled)
|
||||
hw->mac.orig_autoc =
|
||||
(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
|
||||
@ -1277,7 +1216,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
|
||||
status = hw->mac.ops.prot_autoc_write(hw,
|
||||
hw->mac.orig_autoc,
|
||||
false);
|
||||
if (!status)
|
||||
if (status)
|
||||
goto reset_hw_out;
|
||||
}
|
||||
|
||||
@ -1658,35 +1597,20 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
|
||||
{
|
||||
|
||||
u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
|
||||
u32 bucket_hash = 0;
|
||||
u32 bucket_hash = 0, hi_dword = 0;
|
||||
int i;
|
||||
|
||||
/* Apply masks to input data */
|
||||
input->dword_stream[0] &= input_mask->dword_stream[0];
|
||||
input->dword_stream[1] &= input_mask->dword_stream[1];
|
||||
input->dword_stream[2] &= input_mask->dword_stream[2];
|
||||
input->dword_stream[3] &= input_mask->dword_stream[3];
|
||||
input->dword_stream[4] &= input_mask->dword_stream[4];
|
||||
input->dword_stream[5] &= input_mask->dword_stream[5];
|
||||
input->dword_stream[6] &= input_mask->dword_stream[6];
|
||||
input->dword_stream[7] &= input_mask->dword_stream[7];
|
||||
input->dword_stream[8] &= input_mask->dword_stream[8];
|
||||
input->dword_stream[9] &= input_mask->dword_stream[9];
|
||||
input->dword_stream[10] &= input_mask->dword_stream[10];
|
||||
for (i = 0; i <= 10; i++)
|
||||
input->dword_stream[i] &= input_mask->dword_stream[i];
|
||||
|
||||
/* record the flow_vm_vlan bits as they are a key part to the hash */
|
||||
flow_vm_vlan = ntohl(input->dword_stream[0]);
|
||||
|
||||
/* generate common hash dword */
|
||||
hi_hash_dword = ntohl(input->dword_stream[1] ^
|
||||
input->dword_stream[2] ^
|
||||
input->dword_stream[3] ^
|
||||
input->dword_stream[4] ^
|
||||
input->dword_stream[5] ^
|
||||
input->dword_stream[6] ^
|
||||
input->dword_stream[7] ^
|
||||
input->dword_stream[8] ^
|
||||
input->dword_stream[9] ^
|
||||
input->dword_stream[10]);
|
||||
for (i = 1; i <= 10; i++)
|
||||
hi_dword ^= input->dword_stream[i];
|
||||
hi_hash_dword = ntohl(hi_dword);
|
||||
|
||||
/* low dword is word swapped version of common */
|
||||
lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
|
||||
@ -1705,21 +1629,8 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
|
||||
lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
|
||||
|
||||
/* Process remaining 30 bit of the key */
|
||||
IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
|
||||
IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
|
||||
IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
|
||||
IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
|
||||
IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
|
||||
IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
|
||||
IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
|
||||
IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
|
||||
IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
|
||||
IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
|
||||
IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
|
||||
IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
|
||||
IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
|
||||
IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
|
||||
IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
|
||||
for (i = 1; i <= 15; i++)
|
||||
IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
|
||||
|
||||
/*
|
||||
* Limit hash to 13 bits since max bucket count is 8K.
|
||||
@ -2589,7 +2500,6 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
|
||||
.release_swfw_sync = &ixgbe_release_swfw_sync,
|
||||
.get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
|
||||
.init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
|
||||
.mng_fw_enabled = &ixgbe_mng_enabled,
|
||||
.prot_autoc_read = &prot_autoc_read_82599,
|
||||
.prot_autoc_write = &prot_autoc_write_82599,
|
||||
};
|
||||
|
@ -73,7 +73,6 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
|
||||
bool link_up;
|
||||
|
||||
switch (hw->phy.media_type) {
|
||||
case ixgbe_media_type_fiber_fixed:
|
||||
case ixgbe_media_type_fiber:
|
||||
hw->mac.ops.check_link(hw, &speed, &link_up, false);
|
||||
/* if link is down, assume supported */
|
||||
@ -143,11 +142,10 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
|
||||
case ixgbe_media_type_backplane:
|
||||
/* some MAC's need RMW protection on AUTOC */
|
||||
ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp);
|
||||
if (!ret_val)
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
/* only backplane uses autoc so fall though */
|
||||
case ixgbe_media_type_fiber_fixed:
|
||||
case ixgbe_media_type_fiber:
|
||||
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
|
||||
|
||||
@ -648,20 +646,17 @@ enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
|
||||
**/
|
||||
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = hw->back;
|
||||
struct ixgbe_mac_info *mac = &hw->mac;
|
||||
u16 link_status;
|
||||
|
||||
hw->bus.type = ixgbe_bus_type_pci_express;
|
||||
|
||||
/* Get the negotiated link width and speed from PCI config space */
|
||||
pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS,
|
||||
&link_status);
|
||||
link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS);
|
||||
|
||||
hw->bus.width = ixgbe_convert_bus_width(link_status);
|
||||
hw->bus.speed = ixgbe_convert_bus_speed(link_status);
|
||||
|
||||
mac->ops.set_lan_id(hw);
|
||||
hw->mac.ops.set_lan_id(hw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2398,7 +2393,6 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
|
||||
|
||||
switch (hw->phy.media_type) {
|
||||
/* Autoneg flow control on fiber adapters */
|
||||
case ixgbe_media_type_fiber_fixed:
|
||||
case ixgbe_media_type_fiber:
|
||||
if (speed == IXGBE_LINK_SPEED_1GB_FULL)
|
||||
ret_val = ixgbe_fc_autoneg_fiber(hw);
|
||||
@ -2440,12 +2434,10 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
|
||||
**/
|
||||
static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = hw->back;
|
||||
s16 devctl2;
|
||||
u32 pollcnt;
|
||||
|
||||
pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_CONTROL2,
|
||||
&devctl2);
|
||||
devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
|
||||
devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
|
||||
|
||||
switch (devctl2) {
|
||||
@ -2723,14 +2715,14 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
|
||||
|
||||
if (!link_up) {
|
||||
ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
|
||||
if (!ret_val)
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
|
||||
autoc_reg |= IXGBE_AUTOC_FLU;
|
||||
|
||||
ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
|
||||
if (!ret_val)
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
@ -2760,14 +2752,14 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
|
||||
bool locked = false;
|
||||
|
||||
ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
|
||||
if (!ret_val)
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
autoc_reg &= ~IXGBE_AUTOC_FLU;
|
||||
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
|
||||
|
||||
ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
|
||||
if (!ret_val)
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
led_reg &= ~IXGBE_LED_MODE_MASK(index);
|
||||
|
@ -111,6 +111,7 @@ s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
|
||||
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
|
||||
u8 build, u8 ver);
|
||||
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
|
||||
bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
|
||||
|
||||
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
|
||||
u32 headroom, int strategy);
|
||||
|
@ -408,13 +408,13 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
|
||||
|
||||
switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
|
||||
/* return 0 to bypass going to ULD for DDPed data */
|
||||
case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
|
||||
case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
|
||||
/* update length of DDPed data */
|
||||
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
|
||||
rc = 0;
|
||||
break;
|
||||
/* unmap the sg list when FCPRSP is received */
|
||||
case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
|
||||
case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
|
||||
dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
|
||||
ddp->sgc, DMA_FROM_DEVICE);
|
||||
ddp->err = ddp_err;
|
||||
@ -422,14 +422,14 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
|
||||
ddp->sgc = 0;
|
||||
/* fall through */
|
||||
/* if DDP length is present pass it through to ULD */
|
||||
case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
|
||||
case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
|
||||
/* update length of DDPed data */
|
||||
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
|
||||
if (ddp->len)
|
||||
rc = ddp->len;
|
||||
break;
|
||||
/* no match will return as an error */
|
||||
case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
|
||||
case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1539,7 +1539,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
|
||||
hdr.network += ETH_HLEN;
|
||||
|
||||
/* handle any vlan tag if present */
|
||||
if (protocol == __constant_htons(ETH_P_8021Q)) {
|
||||
if (protocol == htons(ETH_P_8021Q)) {
|
||||
if ((hdr.network - data) > (max_len - VLAN_HLEN))
|
||||
return max_len;
|
||||
|
||||
@ -1548,7 +1548,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
|
||||
}
|
||||
|
||||
/* handle L3 protocols */
|
||||
if (protocol == __constant_htons(ETH_P_IP)) {
|
||||
if (protocol == htons(ETH_P_IP)) {
|
||||
if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
|
||||
return max_len;
|
||||
|
||||
@ -1562,7 +1562,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
|
||||
/* record next protocol if header is present */
|
||||
if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
|
||||
nexthdr = hdr.ipv4->protocol;
|
||||
} else if (protocol == __constant_htons(ETH_P_IPV6)) {
|
||||
} else if (protocol == htons(ETH_P_IPV6)) {
|
||||
if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
|
||||
return max_len;
|
||||
|
||||
@ -1570,7 +1570,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
|
||||
nexthdr = hdr.ipv6->nexthdr;
|
||||
hlen = sizeof(struct ipv6hdr);
|
||||
#ifdef IXGBE_FCOE
|
||||
} else if (protocol == __constant_htons(ETH_P_FCOE)) {
|
||||
} else if (protocol == htons(ETH_P_FCOE)) {
|
||||
if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
|
||||
return max_len;
|
||||
hlen = FCOE_HEADER_LEN;
|
||||
@ -4655,8 +4655,6 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
|
||||
static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
struct net_device *upper;
|
||||
struct list_head *iter;
|
||||
int err;
|
||||
u32 ctrl_ext;
|
||||
|
||||
@ -4698,19 +4696,6 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
|
||||
e_crit(drv, "Fan has stopped, replace the adapter\n");
|
||||
}
|
||||
|
||||
/* enable transmits */
|
||||
netif_tx_start_all_queues(adapter->netdev);
|
||||
|
||||
/* enable any upper devices */
|
||||
netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
|
||||
if (netif_is_macvlan(upper)) {
|
||||
struct macvlan_dev *vlan = netdev_priv(upper);
|
||||
|
||||
if (vlan->fwd_priv)
|
||||
netif_tx_start_all_queues(upper);
|
||||
}
|
||||
}
|
||||
|
||||
/* bring the link up in the watchdog, this could race with our first
|
||||
* link up interrupt but shouldn't be a problem */
|
||||
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
|
||||
@ -6082,6 +6067,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
struct net_device *upper;
|
||||
struct list_head *iter;
|
||||
u32 link_speed = adapter->link_speed;
|
||||
bool flow_rx, flow_tx;
|
||||
|
||||
@ -6133,6 +6120,21 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
|
||||
netif_carrier_on(netdev);
|
||||
ixgbe_check_vf_rate_limit(adapter);
|
||||
|
||||
/* enable transmits */
|
||||
netif_tx_wake_all_queues(adapter->netdev);
|
||||
|
||||
/* enable any upper devices */
|
||||
rtnl_lock();
|
||||
netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
|
||||
if (netif_is_macvlan(upper)) {
|
||||
struct macvlan_dev *vlan = netdev_priv(upper);
|
||||
|
||||
if (vlan->fwd_priv)
|
||||
netif_tx_wake_all_queues(upper);
|
||||
}
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
||||
/* update the default user priority for VFs */
|
||||
ixgbe_update_default_up(adapter);
|
||||
|
||||
@ -6520,7 +6522,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
|
||||
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
|
||||
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
|
||||
|
||||
if (first->protocol == __constant_htons(ETH_P_IP)) {
|
||||
if (first->protocol == htons(ETH_P_IP)) {
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
iph->tot_len = 0;
|
||||
iph->check = 0;
|
||||
@ -6580,12 +6582,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
|
||||
} else {
|
||||
u8 l4_hdr = 0;
|
||||
switch (first->protocol) {
|
||||
case __constant_htons(ETH_P_IP):
|
||||
case htons(ETH_P_IP):
|
||||
vlan_macip_lens |= skb_network_header_len(skb);
|
||||
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
|
||||
l4_hdr = ip_hdr(skb)->protocol;
|
||||
break;
|
||||
case __constant_htons(ETH_P_IPV6):
|
||||
case htons(ETH_P_IPV6):
|
||||
vlan_macip_lens |= skb_network_header_len(skb);
|
||||
l4_hdr = ipv6_hdr(skb)->nexthdr;
|
||||
break;
|
||||
@ -6860,9 +6862,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
|
||||
hdr.network = skb_network_header(first->skb);
|
||||
|
||||
/* Currently only IPv4/IPv6 with TCP is supported */
|
||||
if ((first->protocol != __constant_htons(ETH_P_IPV6) ||
|
||||
if ((first->protocol != htons(ETH_P_IPV6) ||
|
||||
hdr.ipv6->nexthdr != IPPROTO_TCP) &&
|
||||
(first->protocol != __constant_htons(ETH_P_IP) ||
|
||||
(first->protocol != htons(ETH_P_IP) ||
|
||||
hdr.ipv4->protocol != IPPROTO_TCP))
|
||||
return;
|
||||
|
||||
@ -6895,12 +6897,12 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
|
||||
* and write the value to source port portion of compressed dword
|
||||
*/
|
||||
if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
|
||||
common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
|
||||
common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
|
||||
else
|
||||
common.port.src ^= th->dest ^ first->protocol;
|
||||
common.port.dst ^= th->source;
|
||||
|
||||
if (first->protocol == __constant_htons(ETH_P_IP)) {
|
||||
if (first->protocol == htons(ETH_P_IP)) {
|
||||
input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
|
||||
common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
|
||||
} else {
|
||||
@ -6966,8 +6968,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
* or FIP and we have FCoE enabled on the adapter
|
||||
*/
|
||||
switch (vlan_get_protocol(skb)) {
|
||||
case __constant_htons(ETH_P_FCOE):
|
||||
case __constant_htons(ETH_P_FIP):
|
||||
case htons(ETH_P_FCOE):
|
||||
case htons(ETH_P_FIP):
|
||||
adapter = netdev_priv(dev);
|
||||
|
||||
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
|
||||
@ -7028,7 +7030,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
||||
tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
|
||||
tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
|
||||
/* else if it is a SW VLAN check the next protocol and store the tag */
|
||||
} else if (protocol == __constant_htons(ETH_P_8021Q)) {
|
||||
} else if (protocol == htons(ETH_P_8021Q)) {
|
||||
struct vlan_hdr *vhdr, _vhdr;
|
||||
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
|
||||
if (!vhdr)
|
||||
@ -7087,7 +7089,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
||||
|
||||
#ifdef IXGBE_FCOE
|
||||
/* setup tx offload for FCoE */
|
||||
if ((protocol == __constant_htons(ETH_P_FCOE)) &&
|
||||
if ((protocol == htons(ETH_P_FCOE)) &&
|
||||
(tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
|
||||
tso = ixgbe_fso(tx_ring, first, &hdr_len);
|
||||
if (tso < 0)
|
||||
@ -8038,10 +8040,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (err)
|
||||
goto err_sw_init;
|
||||
|
||||
/* Cache if MNG FW is up so we don't have to read the REG later */
|
||||
if (hw->mac.ops.mng_fw_enabled)
|
||||
hw->mng_fw_enabled = hw->mac.ops.mng_fw_enabled(hw);
|
||||
|
||||
/* Make it possible the adapter to be woken up via WOL */
|
||||
switch (adapter->hw.mac.type) {
|
||||
case ixgbe_mac_82599EB:
|
||||
@ -8292,7 +8290,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
ixgbe_dbg_adapter_init(adapter);
|
||||
|
||||
/* Need link setup for MNG FW, else wait for IXGBE_UP */
|
||||
if (hw->mng_fw_enabled && hw->mac.ops.setup_link)
|
||||
if (ixgbe_mng_enabled(hw) && hw->mac.ops.setup_link)
|
||||
hw->mac.ops.setup_link(hw,
|
||||
IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
|
||||
true);
|
||||
|
@ -66,9 +66,6 @@
|
||||
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
|
||||
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
|
||||
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
|
||||
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
|
||||
#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
|
||||
#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
|
||||
#define IXGBE_SFF_ADDRESSING_MODE 0x4
|
||||
#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
|
||||
#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
|
||||
@ -80,7 +77,6 @@
|
||||
#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
|
||||
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
|
||||
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
|
||||
|
||||
/* Flow control defines */
|
||||
#define IXGBE_TAF_SYM_PAUSE 0x400
|
||||
#define IXGBE_TAF_ASM_PAUSE 0x800
|
||||
|
@ -1793,6 +1793,9 @@ enum {
|
||||
#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
|
||||
#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
|
||||
|
||||
#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */
|
||||
#define IXGBE_EEPROM_CCD_BIT 2 /* EEPROM Core Clock Disable bit */
|
||||
|
||||
#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
|
||||
#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
|
||||
#endif
|
||||
@ -2661,7 +2664,6 @@ enum ixgbe_sfp_type {
|
||||
enum ixgbe_media_type {
|
||||
ixgbe_media_type_unknown = 0,
|
||||
ixgbe_media_type_fiber,
|
||||
ixgbe_media_type_fiber_fixed,
|
||||
ixgbe_media_type_fiber_qsfp,
|
||||
ixgbe_media_type_fiber_lco,
|
||||
ixgbe_media_type_copper,
|
||||
@ -2919,7 +2921,6 @@ struct ixgbe_mac_operations {
|
||||
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
|
||||
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
|
||||
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
|
||||
bool (*mng_fw_enabled)(struct ixgbe_hw *hw);
|
||||
};
|
||||
|
||||
struct ixgbe_phy_operations {
|
||||
@ -3050,7 +3051,6 @@ struct ixgbe_hw {
|
||||
bool adapter_stopped;
|
||||
bool force_full_reset;
|
||||
bool allow_unsupported_sfp;
|
||||
bool mng_fw_enabled;
|
||||
bool wol_enabled;
|
||||
};
|
||||
|
||||
|
@ -855,7 +855,6 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
|
||||
.enable_rx_buff = &ixgbe_enable_rx_buff_generic,
|
||||
.get_thermal_sensor_data = NULL,
|
||||
.init_thermal_sensor_thresh = NULL,
|
||||
.mng_fw_enabled = NULL,
|
||||
.prot_autoc_read = &prot_autoc_read_generic,
|
||||
.prot_autoc_write = &prot_autoc_write_generic,
|
||||
};
|
||||
|
@ -2857,12 +2857,12 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
u8 l4_hdr = 0;
|
||||
switch (skb->protocol) {
|
||||
case __constant_htons(ETH_P_IP):
|
||||
case htons(ETH_P_IP):
|
||||
vlan_macip_lens |= skb_network_header_len(skb);
|
||||
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
|
||||
l4_hdr = ip_hdr(skb)->protocol;
|
||||
break;
|
||||
case __constant_htons(ETH_P_IPV6):
|
||||
case htons(ETH_P_IPV6):
|
||||
vlan_macip_lens |= skb_network_header_len(skb);
|
||||
l4_hdr = ipv6_hdr(skb)->nexthdr;
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user