Commit 7a3765ed authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
 "One last pull request before heading to Vancouver for LPC, here we have:

   1) Don't forget to free VSI contexts during ice driver unload, from
      Victor Raj.

   2) Don't forget napi delete calls during device remove in ice driver,
      from Dave Ertman.

   3) Don't request VLAN tag insertion of ibmvnic device when SKB
      doesn't have VLAN tags at all.

   4) IPV4 frag handling code has to accomodate the situation where two
      threads try to insert the same fragment into the hash table at the
      same time. From Eric Dumazet.

   5) Relatedly, don't flow separate on protocol ports for fragmented
      frames, also from Eric Dumazet.

   6) Memory leaks in qed driver, from Denis Bolotin.

   7) Correct valid MTU range in smsc95xx driver, from Stefan Wahren.

   8) Validate cls_flower nested policies properly, from Jakub Kicinski.

   9) Clearing of stats counters in mc88e6xxx driver doesn't retain
      important bits in the G1_STATS_OP register causing the chip to
      hang. Fix from Andrew Lunn"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (41 commits)
  act_mirred: clear skb->tstamp on redirect
  net: dsa: mv88e6xxx: Fix clearing of stats counters
  tipc: fix link re-establish failure
  net: sched: cls_flower: validate nested enc_opts_policy to avoid warning
  net: mvneta: correct typo
  flow_dissector: do not dissect l4 ports for fragments
  net: qualcomm: rmnet: Fix incorrect assignment of real_dev
  net: aquantia: allow rx checksum offload configuration
  net: aquantia: invalid checksumm offload implementation
  net: aquantia: fixed enable unicast on 32 macvlan
  net: aquantia: fix potential IOMMU fault after driver unbind
  net: aquantia: synchronized flow control between mac/phy
  net: smsc95xx: Fix MTU range
  net: stmmac: Fix RX packet size > 8191
  qed: Fix potential memory corruption
  qed: Fix SPQ entries not returned to pool in error flows
  qed: Fix blocking/unlimited SPQ entries leak
  qed: Fix memory/entry leak in qed_init_sp_request()
  inet: frags: better deal with smp races
  net: hns3: bugfix for not checking return value
  ...
parents e12e00e3 7236ead1
......@@ -567,6 +567,8 @@ int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip)
if (err)
return err;
/* Keep the histogram mode bits */
val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
......
......@@ -407,13 +407,13 @@ static void aq_ethtool_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *pause)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
u32 fc = aq_nic->aq_nic_cfg.flow_control;
pause->autoneg = 0;
if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
pause->rx_pause = 1;
if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
pause->tx_pause = 1;
pause->rx_pause = !!(fc & AQ_NIC_FC_RX);
pause->tx_pause = !!(fc & AQ_NIC_FC_TX);
}
static int aq_ethtool_set_pauseparam(struct net_device *ndev,
......
......@@ -204,6 +204,10 @@ struct aq_hw_ops {
int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
int (*hw_set_offload)(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg);
int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
};
struct aq_fw_ops {
......@@ -226,6 +230,8 @@ struct aq_fw_ops {
int (*update_stats)(struct aq_hw_s *self);
u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode);
int (*set_flow_control)(struct aq_hw_s *self);
int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
......
......@@ -99,8 +99,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic);
bool is_lro = false;
int err = 0;
aq_cfg->features = features;
if (aq_cfg->hw_features & NETIF_F_LRO) {
if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
is_lro = features & NETIF_F_LRO;
if (aq_cfg->is_lro != is_lro) {
......@@ -112,8 +115,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
}
}
}
if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM)
err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
aq_cfg);
return 0;
return err;
}
static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
......
......@@ -118,12 +118,13 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
}
cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
cfg->hw_features = cfg->aq_hw_caps->hw_features;
cfg->features = cfg->aq_hw_caps->hw_features;
}
static int aq_nic_update_link_status(struct aq_nic_s *self)
{
int err = self->aq_fw_ops->update_link_status(self->aq_hw);
u32 fc = 0;
if (err)
return err;
......@@ -133,6 +134,15 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
AQ_CFG_DRV_NAME, self->link_status.mbps,
self->aq_hw->aq_link_status.mbps);
aq_nic_update_interrupt_moderation_settings(self);
/* Driver has to update flow control settings on RX block
* on any link event.
* We should query FW whether it negotiated FC.
*/
if (self->aq_fw_ops->get_flow_control)
self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
if (self->aq_hw_ops->hw_set_fc)
self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
}
self->link_status = self->aq_hw->aq_link_status;
......@@ -590,7 +600,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
}
}
if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
packet_filter |= IFF_MULTICAST;
self->mc_list.count = i;
self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
......@@ -772,7 +782,9 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX)
/* Asym is when either RX or TX, but not both */
if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Asym_Pause);
......
......@@ -23,7 +23,7 @@ struct aq_vec_s;
struct aq_nic_cfg_s {
const struct aq_hw_caps_s *aq_hw_caps;
u64 hw_features;
u64 features;
u32 rxds; /* rx ring size, descriptors # */
u32 txds; /* tx ring size, descriptors # */
u32 vecs; /* vecs==allocated irqs */
......
......@@ -172,6 +172,27 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
return !!budget;
}
static void aq_rx_checksum(struct aq_ring_s *self,
struct aq_ring_buff_s *buff,
struct sk_buff *skb)
{
if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
return;
if (unlikely(buff->is_cso_err)) {
++self->stats.rx.errors;
skb->ip_summed = CHECKSUM_NONE;
return;
}
if (buff->is_ip_cso) {
__skb_incr_checksum_unnecessary(skb);
if (buff->is_udp_cso || buff->is_tcp_cso)
__skb_incr_checksum_unnecessary(skb);
} else {
skb->ip_summed = CHECKSUM_NONE;
}
}
#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
int aq_ring_rx_clean(struct aq_ring_s *self,
struct napi_struct *napi,
......@@ -267,18 +288,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
}
skb->protocol = eth_type_trans(skb, ndev);
if (unlikely(buff->is_cso_err)) {
++self->stats.rx.errors;
skb->ip_summed = CHECKSUM_NONE;
} else {
if (buff->is_ip_cso) {
__skb_incr_checksum_unnecessary(skb);
if (buff->is_udp_cso || buff->is_tcp_cso)
__skb_incr_checksum_unnecessary(skb);
} else {
skb->ip_summed = CHECKSUM_NONE;
}
}
aq_rx_checksum(self, buff, skb);
skb_set_hash(skb, buff->rss_hash,
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
......
......@@ -100,12 +100,17 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
return err;
}
static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
{
hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
return 0;
}
static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
{
u32 tc = 0U;
u32 buff_size = 0U;
unsigned int i_priority = 0U;
bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
......@@ -138,7 +143,6 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
/* QoS Rx buf size per TC */
tc = 0;
is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
buff_size = HW_ATL_B0_RXBUF_MAX;
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
......@@ -150,7 +154,8 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
(buff_size *
(1024U / 32U) * 50U) /
100U, tc);
hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
......@@ -229,8 +234,10 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
NETIF_F_RXCSUM));
hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
NETIF_F_RXCSUM));
/* LSO offloads*/
hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
......@@ -655,9 +662,9 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
&ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
unsigned int is_err = 1U;
unsigned int is_rx_check_sum_enabled = 0U;
unsigned int pkt_type = 0U;
u8 rx_stat = 0U;
if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
break;
......@@ -665,35 +672,35 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->hw_head];
is_err = (0x0000003CU & rxd_wb->status);
rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
is_err &= ~0x20U; /* exclude validity bit */
pkt_type = 0xFFU & (rxd_wb->type >> 4);
if (is_rx_check_sum_enabled) {
if (0x0U == (pkt_type & 0x3U))
buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U;
if (is_rx_check_sum_enabled & BIT(0) &&
(0x0U == (pkt_type & 0x3U)))
buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
if (is_rx_check_sum_enabled & BIT(1)) {
if (0x4U == (pkt_type & 0x1CU))
buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
!!(rx_stat & BIT(3));
else if (0x0U == (pkt_type & 0x1CU))
buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
/* Checksum offload workaround for small packets */
if (rxd_wb->pkt_len <= 60) {
buff->is_ip_cso = 0U;
buff->is_cso_err = 0U;
}
buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
!!(rx_stat & BIT(3));
}
buff->is_cso_err = !!(rx_stat & 0x6);
/* Checksum offload workaround for small packets */
if (unlikely(rxd_wb->pkt_len <= 60)) {
buff->is_ip_cso = 0U;
buff->is_cso_err = 0U;
}
is_err &= ~0x18U;
dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
if (is_err || rxd_wb->type & 0x1000U) {
/* status error or DMA error */
if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
/* MAC error or DMA error */
buff->is_error = 1U;
} else {
if (self->aq_nic_cfg->is_rss) {
......@@ -915,6 +922,12 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
{
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
/* Invalidate Descriptor Cache to prevent writing to the cached
* descriptors and to the data pointer of those descriptors
*/
hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
return aq_hw_err_from_flags(self);
}
......@@ -963,4 +976,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_get_regs = hw_atl_utils_hw_get_regs,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
.hw_set_offload = hw_atl_b0_hw_offload_set,
.hw_set_fc = hw_atl_b0_set_fc,
};
......@@ -619,6 +619,14 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
}
void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
init);
}
void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc, u32 buffer)
{
......
......@@ -325,6 +325,9 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc,
u32 buffer);
/* set rdm rx dma descriptor cache init */
void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
/* set rx xoff enable (per tc) */
void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
u32 buffer);
......
......@@ -293,6 +293,24 @@
/* default value of bitfield desc{d}_reset */
#define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0
/* rdm_desc_init_i bitfield definitions
* preprocessor definitions for the bitfield rdm_desc_init_i.
* port="pif_rdm_desc_init_i"
*/
/* register address for bitfield rdm_desc_init_i */
#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR 0x00005a00
/* bitmask for bitfield rdm_desc_init_i */
#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK 0xffffffff
/* inverted bitmask for bitfield rdm_desc_init_i */
#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSKN 0x00000000
/* lower bit position of bitfield rdm_desc_init_i */
#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT 0
/* width of bitfield rdm_desc_init_i */
#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_WIDTH 32
/* default value of bitfield rdm_desc_init_i */
#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
/* rx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
* port="pif_rdm_int_desc_wrb_en_i"
......
......@@ -30,6 +30,8 @@
#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
#define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE)
#define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE)
#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY)
#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL)
......@@ -451,6 +453,24 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
return 0;
}
static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
{
u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
if (mpi_state & HW_ATL_FW2X_CAP_PAUSE)
if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
*fcmode = AQ_NIC_FC_RX;
else
*fcmode = AQ_NIC_FC_RX | AQ_NIC_FC_TX;
else
if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
*fcmode = AQ_NIC_FC_TX;
else
*fcmode = 0;
return 0;
}
const struct aq_fw_ops aq_fw_2x_ops = {
.init = aq_fw2x_init,
.deinit = aq_fw2x_deinit,
......@@ -465,4 +485,5 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.set_eee_rate = aq_fw2x_set_eee_rate,
.get_eee_rate = aq_fw2x_get_eee_rate,
.set_flow_control = aq_fw2x_set_flow_control,
.get_flow_control = aq_fw2x_get_flow_control
};
......@@ -3760,7 +3760,8 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
/* Hardware table is only clear when pf resets */
if (!(handle->flags & HNAE3_SUPPORT_VF)) {
ret = hns3_restore_vlan(netdev);
return ret;
if (ret)
return ret;
}
ret = hns3_restore_fd_rules(netdev);
......
......@@ -1545,7 +1545,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.sge_len = cpu_to_be32(skb->len);
tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
if (adapter->vlan_header_insertion) {
if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
}
......
......@@ -12249,6 +12249,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_PARTIAL |
NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_SCTP_CRC |
......@@ -12266,13 +12268,13 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
/* record features VLANs can make use of */
netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
hw_features = hw_enc_features |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
netdev->hw_features |= hw_features;
netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
......
......@@ -76,6 +76,8 @@ extern const char ice_drv_ver[];
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_RESET_WAIT 20
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
......@@ -189,7 +191,6 @@ struct ice_vsi {
u64 tx_linearize;
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS);
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
unsigned int current_netdev_flags;
u32 tx_restart;
u32 tx_busy;
......@@ -369,5 +370,6 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
void ice_napi_del(struct ice_vsi *vsi);
#endif /* _ICE_H_ */
......@@ -811,6 +811,9 @@ void ice_deinit_hw(struct ice_hw *hw)
/* Attempt to disable FW logging before shutting down control queues */
ice_cfg_fw_log(hw, false);
ice_shutdown_all_ctrlq(hw);
/* Clear VSI contexts if not already cleared */
ice_clear_all_vsi_ctx(hw);
}
/**
......
......@@ -1517,10 +1517,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
}
if (!test_bit(__ICE_DOWN, pf->state)) {
/* Give it a little more time to try to come back */
/* Give it a little more time to try to come back. If still
* down, restart autoneg link or reinitialize the interface.
*/
msleep(75);
if (!test_bit(__ICE_DOWN, pf->state))
return ice_nway_reset(netdev);
ice_down(vsi);
ice_up(vsi);
}
return err;
......
......@@ -242,6 +242,8 @@
#define GLNVM_ULD 0x000B6008
#define GLNVM_ULD_CORER_DONE_M BIT(3)
#define GLNVM_ULD_GLOBR_DONE_M BIT(4)
#define GLPCI_CNF2 0x000BE004
#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
#define PF_FUNC_RID 0x0009E880
#define PF_FUNC_RID_FUNC_NUM_S 0
#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0)
......
......@@ -1997,7 +1997,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
if (status) {
netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status,
ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
vsi->back->hw.adminq.sq_last_status);
goto err_out;
}
......@@ -2458,6 +2458,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
* on this wq
*/
if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
ice_napi_del(vsi);
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment