Commit 050cdc6c authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) ICE, E1000, IGB, IXGBE, and I40E bug fixes from the Intel folks.

 2) Better fix for AB-BA deadlock in packet scheduler code, from Cong
    Wang.

 3) bpf sockmap fixes (zero sized key handling, etc.) from Daniel
    Borkmann.

 4) Send zero IPID in TCP resets and SYN-RECV state ACKs, to prevent
    attackers using it as a side-channel. From Eric Dumazet.

 5) Memory leak in mediatek bluetooth driver, from Gustavo A. R. Silva.

 6) Hook up rt->dst.input of ipv6 anycast routes properly, from Hangbin
    Liu.

 7) hns and hns3 bug fixes from Huazhong Tan.

 8) Fix RIF leak in mlxsw driver, from Ido Schimmel.

 9) iova range check fix in vhost, from Jason Wang.

10) Fix hang in do_tcp_sendpages() with tls, from John Fastabend.

11) More r8152 chips need to disable RX aggregation, from Kai-Heng Feng.

12) Memory exposure in TCA_U32_SEL handling, from Kees Cook.

13) TCP BBR congestion control fixes from Kevin Yang.

14) hv_netvsc, ignore non-PCI devices, from Stephen Hemminger.

15) qed driver fixes from Tomer Tayar.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (77 commits)
  net: sched: Fix memory exposure from short TCA_U32_SEL
  qed: fix spelling mistake "comparsion" -> "comparison"
  vhost: correctly check the iova range when waking virtqueue
  qlge: Fix netdev features configuration.
  net: macb: do not disable MDIO bus at open/close time
  Revert "net: stmmac: fix build failure due to missing COMMON_CLK dependency"
  net: macb: Fix regression breaking non-MDIO fixed-link PHYs
  mlxsw: spectrum_switchdev: Do not leak RIFs when removing bridge
  i40e: fix condition of WARN_ONCE for stat strings
  i40e: Fix for Tx timeouts when interface is brought up if DCB is enabled
  ixgbe: fix driver behaviour after issuing VFLR
  ixgbe: Prevent unsupported configurations with XDP
  ixgbe: Replace GFP_ATOMIC with GFP_KERNEL
  igb: Replace mdelay() with msleep() in igb_integrated_phy_loopback()
  igb: Replace GFP_ATOMIC with GFP_KERNEL in igb_sw_init()
  igb: Use an advanced ctx descriptor for launchtime
  e1000: ensure to free old tx/rx rings in set_ringparam()
  e1000: check on netif_running() before calling e1000_up()
  ixgb: use dma_zalloc_coherent instead of allocator/memset
  ice: Trivial formatting fixes
  ...
parents 908946c4 98c8f125
......@@ -200,6 +200,7 @@ config BT_HCIUART_RTL
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
depends on GPIOLIB
depends on ACPI
select BT_HCIUART_3WIRE
select BT_RTL
help
......
......@@ -144,8 +144,10 @@ static int mtk_setup_fw(struct hci_dev *hdev)
fw_size = fw->size;
/* The size of patch header is 30 bytes, should be skip */
if (fw_size < 30)
return -EINVAL;
if (fw_size < 30) {
err = -EINVAL;
goto free_fw;
}
fw_size -= 30;
fw_ptr += 30;
......@@ -172,8 +174,8 @@ static int mtk_setup_fw(struct hci_dev *hdev)
fw_ptr += dlen;
}
free_fw:
release_firmware(fw);
return err;
}
......
......@@ -110,16 +110,14 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
struct tcf_exts *tc_exts)
{
const struct tc_action *tc_act;
LIST_HEAD(tc_actions);
int rc;
int i, rc;
if (!tcf_exts_has_actions(tc_exts)) {
netdev_info(bp->dev, "no actions");
return -EINVAL;
}
tcf_exts_to_list(tc_exts, &tc_actions);
list_for_each_entry(tc_act, &tc_actions, list) {
tcf_exts_for_each_action(i, tc_act, tc_exts) {
/* Drop action */
if (is_tcf_gact_shot(tc_act)) {
actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
......
......@@ -482,11 +482,6 @@ static int macb_mii_probe(struct net_device *dev)
if (np) {
if (of_phy_is_fixed_link(np)) {
if (of_phy_register_fixed_link(np) < 0) {
dev_err(&bp->pdev->dev,
"broken fixed-link specification\n");
return -ENODEV;
}
bp->phy_node = of_node_get(np);
} else {
bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
......@@ -569,7 +564,7 @@ static int macb_mii_init(struct macb *bp)
{
struct macb_platform_data *pdata;
struct device_node *np;
int err;
int err = -ENXIO;
/* Enable management port */
macb_writel(bp, NCR, MACB_BIT(MPE));
......@@ -592,12 +587,23 @@ static int macb_mii_init(struct macb *bp)
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
np = bp->pdev->dev.of_node;
if (pdata)
bp->mii_bus->phy_mask = pdata->phy_mask;
if (np && of_phy_is_fixed_link(np)) {
if (of_phy_register_fixed_link(np) < 0) {
dev_err(&bp->pdev->dev,
"broken fixed-link specification %pOF\n", np);
goto err_out_free_mdiobus;
}
err = mdiobus_register(bp->mii_bus);
} else {
if (pdata)
bp->mii_bus->phy_mask = pdata->phy_mask;
err = of_mdiobus_register(bp->mii_bus, np);
}
err = of_mdiobus_register(bp->mii_bus, np);
if (err)
goto err_out_free_mdiobus;
goto err_out_free_fixed_link;
err = macb_mii_probe(bp->dev);
if (err)
......@@ -607,6 +613,7 @@ static int macb_mii_init(struct macb *bp)
err_out_unregister_bus:
mdiobus_unregister(bp->mii_bus);
err_out_free_fixed_link:
if (np && of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
err_out_free_mdiobus:
......@@ -2028,14 +2035,17 @@ static void macb_reset_hw(struct macb *bp)
{
struct macb_queue *queue;
unsigned int q;
u32 ctrl = macb_readl(bp, NCR);
/* Disable RX and TX (XXX: Should we halt the transmission
* more gracefully?)
*/
macb_writel(bp, NCR, 0);
ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
/* Clear the stats registers (XXX: Update stats first?) */
macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
ctrl |= MACB_BIT(CLRSTAT);
macb_writel(bp, NCR, ctrl);
/* Clear all status flags */
macb_writel(bp, TSR, -1);
......@@ -2223,7 +2233,7 @@ static void macb_init_hw(struct macb *bp)
}
/* Enable TX and RX */
macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
}
/* The hash address register is 64 bits long and takes up two
......
......@@ -417,10 +417,9 @@ static void cxgb4_process_flow_actions(struct net_device *in,
struct ch_filter_specification *fs)
{
const struct tc_action *a;
LIST_HEAD(actions);
int i;
tcf_exts_to_list(cls->exts, &actions);
list_for_each_entry(a, &actions, list) {
tcf_exts_for_each_action(i, a, cls->exts) {
if (is_tcf_gact_ok(a)) {
fs->action = FILTER_PASS;
} else if (is_tcf_gact_shot(a)) {
......@@ -591,10 +590,9 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
bool act_redir = false;
bool act_pedit = false;
bool act_vlan = false;
LIST_HEAD(actions);
int i;
tcf_exts_to_list(cls->exts, &actions);
list_for_each_entry(a, &actions, list) {
tcf_exts_for_each_action(i, a, cls->exts) {
if (is_tcf_gact_ok(a)) {
/* Do nothing */
} else if (is_tcf_gact_shot(a)) {
......
......@@ -93,14 +93,13 @@ static int fill_action_fields(struct adapter *adap,
unsigned int num_actions = 0;
const struct tc_action *a;
struct tcf_exts *exts;
LIST_HEAD(actions);
int i;
exts = cls->knode.exts;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
tcf_exts_for_each_action(i, a, exts) {
/* Don't allow more than one action per rule. */
if (num_actions)
return -EINVAL;
......
......@@ -220,10 +220,10 @@ struct hnae_desc_cb {
/* priv data for the desc, e.g. skb when use with ip stack*/
void *priv;
u16 page_offset;
u16 reuse_flag;
u32 page_offset;
u32 length; /* length of the buffer */
u16 length; /* length of the buffer */
u16 reuse_flag;
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;
......
......@@ -406,113 +406,13 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
return NETDEV_TX_BUSY;
}
/**
* hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
* @data: pointer to the start of the headers
* @max: total length of section to find headers in
*
* This function is meant to determine the length of headers that will
* be recognized by hardware for LRO, GRO, and RSC offloads. The main
* motivation of doing this is to only perform one pull for IPv4 TCP
* packets so that we can do basic things like calculating the gso_size
* based on the average data per packet.
**/
static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
unsigned int max_size)
{
unsigned char *network;
u8 hlen;
/* this should never happen, but better safe than sorry */
if (max_size < ETH_HLEN)
return max_size;
/* initialize network frame pointer */
network = data;
/* set first protocol and move network header forward */
network += ETH_HLEN;
/* handle any vlan tag if present */
if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
== HNS_RX_FLAG_VLAN_PRESENT) {
if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
return max_size;
network += VLAN_HLEN;
}
/* handle L3 protocols */
if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
== HNS_RX_FLAG_L3ID_IPV4) {
if ((typeof(max_size))(network - data) >
(max_size - sizeof(struct iphdr)))
return max_size;
/* access ihl as a u8 to avoid unaligned access on ia64 */
hlen = (network[0] & 0x0F) << 2;
/* verify hlen meets minimum size requirements */
if (hlen < sizeof(struct iphdr))
return network - data;
/* record next protocol if header is present */
} else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
== HNS_RX_FLAG_L3ID_IPV6) {
if ((typeof(max_size))(network - data) >
(max_size - sizeof(struct ipv6hdr)))
return max_size;
/* record next protocol */
hlen = sizeof(struct ipv6hdr);
} else {
return network - data;
}
/* relocate pointer to start of L4 header */
network += hlen;
/* finally sort out TCP/UDP */
if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
== HNS_RX_FLAG_L4ID_TCP) {
if ((typeof(max_size))(network - data) >
(max_size - sizeof(struct tcphdr)))
return max_size;
/* access doff as a u8 to avoid unaligned access on ia64 */
hlen = (network[12] & 0xF0) >> 2;
/* verify hlen meets minimum size requirements */
if (hlen < sizeof(struct tcphdr))
return network - data;
network += hlen;
} else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
== HNS_RX_FLAG_L4ID_UDP) {
if ((typeof(max_size))(network - data) >
(max_size - sizeof(struct udphdr)))
return max_size;
network += sizeof(struct udphdr);
}
/* If everything has gone correctly network should be the
* data section of the packet and will be the end of the header.
* If not then it probably represents the end of the last recognized
* header.
*/
if ((typeof(max_size))(network - data) < max_size)
return network - data;
else
return max_size;
}
static void hns_nic_reuse_page(struct sk_buff *skb, int i,
struct hnae_ring *ring, int pull_len,
struct hnae_desc_cb *desc_cb)
{
struct hnae_desc *desc;
int truesize, size;
u32 truesize;
int size;
int last_offset;
bool twobufs;
......@@ -530,7 +430,7 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i,
}
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize - pull_len);
size - pull_len, truesize);
/* avoid re-using remote pages,flag default unreuse */
if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
......@@ -695,7 +595,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
} else {
ring->stats.seg_pkt_cnt++;
pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE);
memcpy(__skb_put(skb, pull_len), va,
ALIGN(pull_len, sizeof(long)));
......
......@@ -2019,7 +2019,8 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_desc_cb *desc_cb)
{
struct hns3_desc *desc;
int truesize, size;
u32 truesize;
int size;
int last_offset;
bool twobufs;
......
......@@ -284,11 +284,11 @@ struct hns3_desc_cb {
/* priv data for the desc, e.g. skb when use with ip stack*/
void *priv;
u16 page_offset;
u16 reuse_flag;
u32 page_offset;
u32 length; /* length of the buffer */
u16 reuse_flag;
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;
};
......
......@@ -624,14 +624,14 @@ static int e1000_set_ringparam(struct net_device *netdev,
adapter->tx_ring = tx_old;
e1000_free_all_rx_resources(adapter);
e1000_free_all_tx_resources(adapter);
kfree(tx_old);
kfree(rx_old);
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
err = e1000_up(adapter);
if (err)
goto err_setup;
}
kfree(tx_old);
kfree(rx_old);
clear_bit(__E1000_RESETTING, &adapter->flags);
return 0;
......@@ -644,7 +644,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
err_alloc_rx:
kfree(txdr);
err_alloc_tx:
e1000_up(adapter);
if (netif_running(adapter->netdev))
e1000_up(adapter);
err_setup:
clear_bit(__E1000_RESETTING, &adapter->flags);
return err;
......
......@@ -2013,7 +2013,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
"stat strings count mismatch!");
}
......
......@@ -5122,15 +5122,17 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
u8 *bw_share)
{
struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
struct i40e_pf *pf = vsi->back;
i40e_status ret;
int i;
if (vsi->back->flags & I40E_FLAG_TC_MQPRIO)
/* There is no need to reset BW when mqprio mode is on. */
if (pf->flags & I40E_FLAG_TC_MQPRIO)
return 0;
if (!vsi->mqprio_qopt.qopt.hw) {
if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
if (ret)
dev_info(&vsi->back->pdev->dev,
dev_info(&pf->pdev->dev,
"Failed to reset tx rate for vsi->seid %u\n",
vsi->seid);
return ret;
......@@ -5139,12 +5141,11 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
bw_data.tc_bw_credits[i] = bw_share[i];
ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
NULL);
ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
dev_info(&pf->pdev->dev,
"AQ command Config VSI BW allocation per TC failed = %d\n",
vsi->back->hw.aq.asq_last_status);
pf->hw.aq.asq_last_status);
return -EINVAL;
}
......
......@@ -89,6 +89,13 @@ extern const char ice_drv_ver[];
#define ice_for_each_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
/* Macros for each allocated tx/rx ring whether used or not in a VSI */
#define ice_for_each_alloc_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
#define ice_for_each_alloc_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
struct ice_tc_info {
u16 qoffset;
u16 qcount;
......@@ -189,9 +196,9 @@ struct ice_vsi {
struct list_head tmp_sync_list; /* MAC filters to be synced */
struct list_head tmp_unsync_list; /* MAC filters to be unsynced */
bool irqs_ready;
bool current_isup; /* Sync 'link up' logging */
bool stat_offsets_loaded;
u8 irqs_ready;
u8 current_isup; /* Sync 'link up' logging */
u8 stat_offsets_loaded;
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
......@@ -262,7 +269,7 @@ struct ice_pf {
struct ice_hw_port_stats stats;
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
bool stat_prev_loaded; /* has previous stats been loaded */
u8 stat_prev_loaded; /* has previous stats been loaded */
char int_name[ICE_INT_NAME_STR_LEN];
};
......
......@@ -329,19 +329,19 @@ struct ice_aqc_vsi_props {
/* VLAN section */
__le16 pvid; /* VLANS include priority bits */
u8 pvlan_reserved[2];
u8 port_vlan_flags;
#define ICE_AQ_VSI_PVLAN_MODE_S 0
#define ICE_AQ_VSI_PVLAN_MODE_M (0x3 << ICE_AQ_VSI_PVLAN_MODE_S)
#define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED 0x1
#define ICE_AQ_VSI_PVLAN_MODE_TAGGED 0x2
#define ICE_AQ_VSI_PVLAN_MODE_ALL 0x3
u8 vlan_flags;
#define ICE_AQ_VSI_VLAN_MODE_S 0
#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S)
#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1
#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2
#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3
#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2)
#define ICE_AQ_VSI_PVLAN_EMOD_S 3
#define ICE_AQ_VSI_PVLAN_EMOD_M (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S)
#define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_PVLAN_EMOD_S)
#define ICE_AQ_VSI_PVLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_PVLAN_EMOD_S)
#define ICE_AQ_VSI_PVLAN_EMOD_STR (0x2 << ICE_AQ_VSI_PVLAN_EMOD_S)
#define ICE_AQ_VSI_PVLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S)
#define ICE_AQ_VSI_VLAN_EMOD_S 3
#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S)
#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S)
#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S)
#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
u8 pvlan_reserved2[3];
/* ingress egress up sections */
__le32 ingress_table; /* bitmap, 3 bits per up */
......@@ -594,6 +594,7 @@ struct ice_sw_rule_lg_act {
#define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S)
#define ICE_LG_ACT_GENERIC_PRIORITY_S 22
#define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S)
#define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7
/* Action = 7 - Set Stat count */
#define ICE_LG_ACT_STAT_COUNT 0x7
......
......@@ -45,6 +45,9 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
* Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
* configuration, flow director filters, etc.).
*/
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
{
......@@ -1483,7 +1486,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
struct ice_phy_info *phy_info;
enum ice_status status = 0;
if (!pi)
if (!pi || !link_up)
return ICE_ERR_PARAM;
phy_info = &pi->phy;
......@@ -1619,20 +1622,23 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
}
/* LUT size is only valid for Global and PF table types */
if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) {
flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
} else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) {
switch (lut_size) {
case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
break;
case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
} else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) &&
(lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) {
flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
} else {
break;
case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
break;
}
/* fall-through */
default:
status = ICE_ERR_PARAM;
goto ice_aq_get_set_rss_lut_exit;
}
......
......@@ -597,10 +597,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
return 0;
init_ctrlq_free_rq:
ice_shutdown_rq(hw, cq);
ice_shutdown_sq(hw, cq);
mutex_destroy(&cq->sq_lock);
mutex_destroy(&cq->rq_lock);
if (cq->rq.head) {
ice_shutdown_rq(hw, cq);
mutex_destroy(&cq->rq_lock);
}
if (cq->sq.head) {
ice_shutdown_sq(hw, cq);
mutex_destroy(&cq->sq_lock);
}
return status;
}
......@@ -706,10 +710,14 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
return;
}
ice_shutdown_sq(hw, cq);
ice_shutdown_rq(hw, cq);
mutex_destroy(&cq->sq_lock);
mutex_destroy(&cq->rq_lock);
if (cq->sq.head) {
ice_shutdown_sq(hw, cq);
mutex_destroy(&cq->sq_lock);
}
if (cq->rq.head) {
ice_shutdown_rq(hw, cq);
mutex_destroy(&cq->rq_lock);
}
}
/**
......@@ -1057,8 +1065,11 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
clean_rq_elem_out:
/* Set pending if needed, unlock and return */
if (pending)
if (pending) {
/* re-read HW head to calculate actual pending messages */
ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
}
clean_rq_elem_err:
mutex_unlock(&cq->rq_lock);
......
......@@ -26,7 +26,7 @@ static int ice_q_stats_len(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
return ((np->vsi->num_txq + np->vsi->num_rxq) *
return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
(sizeof(struct ice_q_stats) / sizeof(u64)));
}
......@@ -218,7 +218,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
ice_for_each_txq(vsi, i) {
ice_for_each_alloc_txq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN,
"tx-queue-%u.tx_packets", i);
p += ETH_GSTRING_LEN;
......@@ -226,7 +226,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
ice_for_each_rxq(vsi, i) {
ice_for_each_alloc_rxq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN,
"rx-queue-%u.rx_packets", i);
p += ETH_GSTRING_LEN;
......@@ -253,6 +253,24 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
/* The number (and order) of strings reported *must* remain
* constant for a given netdevice. This function must not
* report a different number based on run time parameters
* (such as the number of queues in use, or the setting of
* a private ethtool flag). This is due to the nature of the
* ethtool stats API.
*
* User space programs such as ethtool must make 3 separate
* ioctl requests, one for size, one for the strings, and
* finally one for the stats. Since these cross into
* user space, changes to the number or size could result in
* undefined memory access or incorrect string<->value
* correlations for statistics.
*
* Even if it appears to be safe, changes to the size or
* order of strings will suffer from race conditions and are
* not safe.
*/
return ICE_ALL_STATS_LEN(netdev);
default:
return -EOPNOTSUPP;
......@@ -280,18 +298,26 @@ ice_get_ethtool_stats(struct net_device *netdev,
/* populate per queue stats */
rcu_read_lock();
ice_for_each_txq(vsi, j) {
ice_for_each_alloc_txq(vsi, j) {
ring = READ_ONCE(vsi->tx_rings[j]);
if (!ring)
continue;
data[i++] = ring->stats.pkts;
data[i++] = ring->stats.bytes;
if (ring) {
data[i++] = ring->stats.pkts;
data[i++] = ring->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
}
}
ice_for_each_rxq(vsi, j) {
ice_for_each_alloc_rxq(vsi, j) {
ring = READ_ONCE(vsi->rx_rings[j]);
data[i++] = ring->stats.pkts;
data[i++] = ring->stats.bytes;
if (ring) {
data[i++] = ring->stats.pkts;
data[i++] = ring->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
}
}
rcu_read_unlock();
......@@ -519,7 +545,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
goto done;
}
for (i = 0; i < vsi->num_txq; i++) {
for (i = 0; i < vsi->alloc_txq; i++) {
/* clone ring and setup updated count */
tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_cnt;
......@@ -551,7 +577,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
goto done;
}
for (i = 0; i < vsi->num_rxq; i++) {
for (i = 0; i < vsi->alloc_rxq; i++) {
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_cnt;
......
......@@ -121,10 +121,6 @@
#define PFINT_FW_CTL_CAUSE_ENA_S 30
#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
#define PFINT_OICR 0x0016CA00
#define PFINT_OICR_HLP_RDY_S 14
#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
#define PFINT_OICR_CPM_RDY_S 15
#define PFINT_OICR_CPM_RDY_M BIT(PFINT_OICR_CPM_RDY_S)
#define PFINT_OICR_ECC_ERR_S 16
#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S)
#define PFINT_OICR_MAL_DETECT_S 19
......@@ -133,10 +129,6 @@
#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S)
#define PFINT_OICR_PCI_EXCEPTION_S 21
#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S)
#define PFINT_OICR_GPIO_S 22
#define PFINT_OICR_GPIO_M BIT(PFINT_OICR_GPIO_S)
#define PFINT_OICR_STORM_DETECT_S 24
#define PFINT_OICR_STORM_DETECT_M BIT(PFINT_OICR_STORM_DETECT_S)
#define PFINT_OICR_HMC_ERR_S 26
#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S)
#define PFINT_OICR_PE_CRITERR_S 28
......
......@@ -265,6 +265,7 @@ enum ice_rx_flex_desc_status_error_0_bits {
struct ice_rlan_ctx {
u16 head;
u16 cpuid; /* bigger than needed, see above for reason */
#define ICE_RLAN_BASE_S 7
u64 base;
u16 qlen;
#define ICE_RLAN_CTX_DBUF_S 7
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment