Commit 050cdc6c authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) ICE, E1000, IGB, IXGBE, and I40E bug fixes from the Intel folks.

 2) Better fix for AB-BA deadlock in packet scheduler code, from Cong
    Wang.

 3) bpf sockmap fixes (zero sized key handling, etc.) from Daniel
    Borkmann.

 4) Send zero IPID in TCP resets and SYN-RECV state ACKs, to prevent
    attackers using it as a side-channel. From Eric Dumazet.

 5) Memory leak in mediatek bluetooth driver, from Gustavo A. R. Silva.

 6) Hook up rt->dst.input of ipv6 anycast routes properly, from Hangbin
    Liu.

 7) hns and hns3 bug fixes from Huazhong Tan.

 8) Fix RIF leak in mlxsw driver, from Ido Schimmel.

 9) iova range check fix in vhost, from Jason Wang.

10) Fix hang in do_tcp_sendpages() with tls, from John Fastabend.

11) More r8152 chips need to disable RX aggregation, from Kai-Heng Feng.

12) Memory exposure in TCA_U32_SEL handling, from Kees Cook.

13) TCP BBR congestion control fixes from Kevin Yang.

14) hv_netvsc, ignore non-PCI devices, from Stephen Hemminger.

15) qed driver fixes from Tomer Tayar.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (77 commits)
  net: sched: Fix memory exposure from short TCA_U32_SEL
  qed: fix spelling mistake "comparsion" -> "comparison"
  vhost: correctly check the iova range when waking virtqueue
  qlge: Fix netdev features configuration.
  net: macb: do not disable MDIO bus at open/close time
  Revert "net: stmmac: fix build failure due to missing COMMON_CLK dependency"
  net: macb: Fix regression breaking non-MDIO fixed-link PHYs
  mlxsw: spectrum_switchdev: Do not leak RIFs when removing bridge
  i40e: fix condition of WARN_ONCE for stat strings
  i40e: Fix for Tx timeouts when interface is brought up if DCB is enabled
  ixgbe: fix driver behaviour after issuing VFLR
  ixgbe: Prevent unsupported configurations with XDP
  ixgbe: Replace GFP_ATOMIC with GFP_KERNEL
  igb: Replace mdelay() with msleep() in igb_integrated_phy_loopback()
  igb: Replace GFP_ATOMIC with GFP_KERNEL in igb_sw_init()
  igb: Use an advanced ctx descriptor for launchtime
  e1000: ensure to free old tx/rx rings in set_ringparam()
  e1000: check on netif_running() before calling e1000_up()
  ixgb: use dma_zalloc_coherent instead of allocator/memset
  ice: Trivial formatting fixes
  ...
parents 908946c4 98c8f125
......@@ -320,13 +320,6 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
static int tcf_police_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, police_net_id);
return tcf_idr_delete_index(tn, index);
}
MODULE_AUTHOR("Alexey Kuznetsov");
MODULE_DESCRIPTION("Policing actions");
MODULE_LICENSE("GPL");
......@@ -340,7 +333,6 @@ static struct tc_action_ops act_police_ops = {
.init = tcf_police_init,
.walk = tcf_police_walker,
.lookup = tcf_police_search,
.delete = tcf_police_delete,
.size = sizeof(struct tcf_police),
};
......
......@@ -232,13 +232,6 @@ static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
static int tcf_sample_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, sample_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_sample_ops = {
.kind = "sample",
.type = TCA_ACT_SAMPLE,
......@@ -249,7 +242,6 @@ static struct tc_action_ops act_sample_ops = {
.cleanup = tcf_sample_cleanup,
.walk = tcf_sample_walker,
.lookup = tcf_sample_search,
.delete = tcf_sample_delete,
.size = sizeof(struct tcf_sample),
};
......
......@@ -196,13 +196,6 @@ static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
static int tcf_simp_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, simp_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_simp_ops = {
.kind = "simple",
.type = TCA_ACT_SIMP,
......@@ -213,7 +206,6 @@ static struct tc_action_ops act_simp_ops = {
.init = tcf_simp_init,
.walk = tcf_simp_walker,
.lookup = tcf_simp_search,
.delete = tcf_simp_delete,
.size = sizeof(struct tcf_defact),
};
......
......@@ -299,13 +299,6 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
static int tcf_skbedit_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, skbedit_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_skbedit_ops = {
.kind = "skbedit",
.type = TCA_ACT_SKBEDIT,
......@@ -316,7 +309,6 @@ static struct tc_action_ops act_skbedit_ops = {
.cleanup = tcf_skbedit_cleanup,
.walk = tcf_skbedit_walker,
.lookup = tcf_skbedit_search,
.delete = tcf_skbedit_delete,
.size = sizeof(struct tcf_skbedit),
};
......
......@@ -259,13 +259,6 @@ static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
static int tcf_skbmod_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, skbmod_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_skbmod_ops = {
.kind = "skbmod",
.type = TCA_ACT_SKBMOD,
......@@ -276,7 +269,6 @@ static struct tc_action_ops act_skbmod_ops = {
.cleanup = tcf_skbmod_cleanup,
.walk = tcf_skbmod_walker,
.lookup = tcf_skbmod_search,
.delete = tcf_skbmod_delete,
.size = sizeof(struct tcf_skbmod),
};
......
......@@ -548,13 +548,6 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
static int tunnel_key_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_tunnel_key_ops = {
.kind = "tunnel_key",
.type = TCA_ACT_TUNNEL_KEY,
......@@ -565,7 +558,6 @@ static struct tc_action_ops act_tunnel_key_ops = {
.cleanup = tunnel_key_release,
.walk = tunnel_key_walker,
.lookup = tunnel_key_search,
.delete = tunnel_key_delete,
.size = sizeof(struct tcf_tunnel_key),
};
......
......@@ -296,13 +296,6 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
static int tcf_vlan_delete(struct net *net, u32 index)
{
struct tc_action_net *tn = net_generic(net, vlan_net_id);
return tcf_idr_delete_index(tn, index);
}
static struct tc_action_ops act_vlan_ops = {
.kind = "vlan",
.type = TCA_ACT_VLAN,
......@@ -313,7 +306,6 @@ static struct tc_action_ops act_vlan_ops = {
.cleanup = tcf_vlan_cleanup,
.walk = tcf_vlan_walker,
.lookup = tcf_vlan_search,
.delete = tcf_vlan_delete,
.size = sizeof(struct tcf_vlan),
};
......
......@@ -914,6 +914,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_U32_MAX + 1];
u32 htid, flags = 0;
size_t sel_size;
int err;
#ifdef CONFIG_CLS_U32_PERF
size_t size;
......@@ -1076,8 +1077,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
}
s = nla_data(tb[TCA_U32_SEL]);
sel_size = struct_size(s, keys, s->nkeys);
if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
err = -EINVAL;
goto erridr;
}
n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
if (n == NULL) {
err = -ENOBUFS;
goto erridr;
......@@ -1092,7 +1098,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
}
#endif
memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
memcpy(&n->sel, s, sel_size);
RCU_INIT_POINTER(n->ht_up, ht);
n->handle = handle;
n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
......
......@@ -64,7 +64,6 @@
#include <linux/vmalloc.h>
#include <linux/reciprocal_div.h>
#include <net/netlink.h>
#include <linux/version.h>
#include <linux/if_vlan.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
......@@ -621,15 +620,20 @@ static bool cake_ddst(int flow_mode)
}
static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
int flow_mode)
int flow_mode, u16 flow_override, u16 host_override)
{
u32 flow_hash = 0, srchost_hash, dsthost_hash;
u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0;
u16 reduced_hash, srchost_idx, dsthost_idx;
struct flow_keys keys, host_keys;
if (unlikely(flow_mode == CAKE_FLOW_NONE))
return 0;
/* If both overrides are set we can skip packet dissection entirely */
if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) &&
(host_override || !(flow_mode & CAKE_FLOW_HOSTS)))
goto skip_hash;
skb_flow_dissect_flow_keys(skb, &keys,
FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
......@@ -676,6 +680,14 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
if (flow_mode & CAKE_FLOW_FLOWS)
flow_hash = flow_hash_from_keys(&keys);
skip_hash:
if (flow_override)
flow_hash = flow_override - 1;
if (host_override) {
dsthost_hash = host_override - 1;
srchost_hash = host_override - 1;
}
if (!(flow_mode & CAKE_FLOW_FLOWS)) {
if (flow_mode & CAKE_FLOW_SRC_IP)
flow_hash ^= srchost_hash;
......@@ -1571,7 +1583,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
struct cake_sched_data *q = qdisc_priv(sch);
struct tcf_proto *filter;
struct tcf_result res;
u32 flow = 0;
u16 flow = 0, host = 0;
int result;
filter = rcu_dereference_bh(q->filter_list);
......@@ -1595,10 +1607,12 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
#endif
if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
flow = TC_H_MIN(res.classid);
if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16))
host = TC_H_MAJ(res.classid) >> 16;
}
hash:
*t = cake_select_tin(sch, skb);
return flow ?: cake_hash(*t, skb, flow_mode) + 1;
return cake_hash(*t, skb, flow_mode, flow, host) + 1;
}
static void cake_reconfigure(struct Qdisc *sch);
......
......@@ -213,9 +213,14 @@ static void tls_write_space(struct sock *sk)
{
struct tls_context *ctx = tls_get_ctx(sk);
/* We are already sending pages, ignore notification */
if (ctx->in_tcp_sendpages)
/* If in_tcp_sendpages call lower protocol write space handler
* to ensure we wake up any waiting operations there. For example
* if do_tcp_sendpages where to call sk_wait_event.
*/
if (ctx->in_tcp_sendpages) {
ctx->sk_write_space(sk);
return;
}
if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
gfp_t sk_allocation = sk->sk_allocation;
......
......@@ -74,14 +74,14 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
return 0;
if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
return force_zc ? -ENOTSUPP : 0; /* fail or fallback */
return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */
bpf.command = XDP_QUERY_XSK_UMEM;
rtnl_lock();
err = xdp_umem_query(dev, queue_id);
if (err) {
err = err < 0 ? -ENOTSUPP : -EBUSY;
err = err < 0 ? -EOPNOTSUPP : -EBUSY;
goto err_rtnl_unlock;
}
......
......@@ -194,8 +194,10 @@ int do_event_pipe(int argc, char **argv)
}
while (argc) {
if (argc < 2)
if (argc < 2) {
BAD_ARG();
goto err_close_map;
}
if (is_prefix(*argv, "cpu")) {
char *endptr;
......@@ -221,6 +223,7 @@ int do_event_pipe(int argc, char **argv)
NEXT_ARG();
} else {
BAD_ARG();
goto err_close_map;
}
do_all = false;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment