diff options
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 18 | ||||
-rw-r--r-- | net/core/drop_monitor.c | 2 | ||||
-rw-r--r-- | net/core/dst.c | 17 | ||||
-rw-r--r-- | net/core/filter.c | 54 | ||||
-rw-r--r-- | net/core/gro.c | 9 | ||||
-rw-r--r-- | net/core/net-traces.c | 2 | ||||
-rw-r--r-- | net/core/net_namespace.c | 10 | ||||
-rw-r--r-- | net/core/netpoll.c | 2 | ||||
-rw-r--r-- | net/core/pktgen.c | 2 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 33 | ||||
-rw-r--r-- | net/core/sock.c | 44 | ||||
-rw-r--r-- | net/core/sock_map.c | 12 | ||||
-rw-r--r-- | net/core/tso.c | 2 |
13 files changed, 126 insertions, 81 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index cd479f5f22f6..8453e14d301b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3512,7 +3512,7 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb, if (gso_segs > READ_ONCE(dev->gso_max_segs)) return features & ~NETIF_F_GSO_MASK; - if (unlikely(skb->len >= READ_ONCE(dev->gso_max_size))) + if (unlikely(skb->len >= netif_get_gso_max_size(dev, skb))) return features & ~NETIF_F_GSO_MASK; if (!skb_shinfo(skb)->gso_type) { @@ -3639,6 +3639,9 @@ int skb_csum_hwoffload_help(struct sk_buff *skb, return 0; if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { + if (vlan_get_protocol(skb) == htons(ETH_P_IPV6) && + skb_network_header_len(skb) != sizeof(struct ipv6hdr)) + goto sw_checksum; switch (skb->csum_offset) { case offsetof(struct tcphdr, check): case offsetof(struct udphdr, check): @@ -3646,6 +3649,7 @@ int skb_csum_hwoffload_help(struct sk_buff *skb, } } +sw_checksum: return skb_checksum_help(skb); } EXPORT_SYMBOL(skb_csum_hwoffload_help); @@ -3758,7 +3762,7 @@ static void qdisc_pkt_len_init(struct sk_buff *skb) sizeof(_tcphdr), &_tcphdr); if (likely(th)) hdr_len += __tcp_hdrlen(th); - } else { + } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { struct udphdr _udphdr; if (skb_header_pointer(skb, hdr_len, @@ -3766,10 +3770,14 @@ static void qdisc_pkt_len_init(struct sk_buff *skb) hdr_len += sizeof(struct udphdr); } - if (shinfo->gso_type & SKB_GSO_DODGY) - gso_segs = DIV_ROUND_UP(skb->len - hdr_len, - shinfo->gso_size); + if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) { + int payload = skb->len - hdr_len; + /* Malicious packet. */ + if (payload <= 0) + return; + gso_segs = DIV_ROUND_UP(payload, shinfo->gso_size); + } qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; } } diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 2e0ae3328232..6efd4cccc9dd 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -37,7 +37,7 @@ #include <trace/events/napi.h> #include <trace/events/devlink.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #define TRACE_ON 1 #define TRACE_OFF 0 diff --git a/net/core/dst.c b/net/core/dst.c index 95f533844f17..9552a90d4772 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -109,9 +109,6 @@ static void dst_destroy(struct dst_entry *dst) child = xdst->child; } #endif - if (!(dst->flags & DST_NOCOUNT)) - dst_entries_add(dst->ops, -1); - if (dst->ops->destroy) dst->ops->destroy(dst); netdev_put(dst->dev, &dst->dev_tracker); @@ -159,17 +156,27 @@ void dst_dev_put(struct dst_entry *dst) } EXPORT_SYMBOL(dst_dev_put); +static void dst_count_dec(struct dst_entry *dst) +{ + if (!(dst->flags & DST_NOCOUNT)) + dst_entries_add(dst->ops, -1); +} + void dst_release(struct dst_entry *dst) { - if (dst && rcuref_put(&dst->__rcuref)) + if (dst && rcuref_put(&dst->__rcuref)) { + dst_count_dec(dst); call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu); + } } EXPORT_SYMBOL(dst_release); void dst_release_immediate(struct dst_entry *dst) { - if (dst && rcuref_put(&dst->__rcuref)) + if (dst && rcuref_put(&dst->__rcuref)) { + dst_count_dec(dst); dst_destroy(dst); + } } EXPORT_SYMBOL(dst_release_immediate); diff --git a/net/core/filter.c b/net/core/filter.c index cd3524cb326b..fb56567c551e 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -42,7 +42,7 @@ #include <linux/errno.h> #include <linux/timer.h> #include <linux/uaccess.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/filter.h> #include <linux/ratelimit.h> #include <linux/seccomp.h> @@ -2249,7 +2249,7 @@ static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb, rcu_read_unlock(); return ret; } - rcu_read_unlock_bh(); + rcu_read_unlock(); if (dst) IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); out_drop: @@ -2438,9 +2438,9 @@ out: /* Internal, non-exposed redirect flags. */ enum { - BPF_F_NEIGH = (1ULL << 1), - BPF_F_PEER = (1ULL << 2), - BPF_F_NEXTHOP = (1ULL << 3), + BPF_F_NEIGH = (1ULL << 16), + BPF_F_PEER = (1ULL << 17), + BPF_F_NEXTHOP = (1ULL << 18), #define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER | BPF_F_NEXTHOP) }; @@ -2450,6 +2450,8 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) struct sk_buff *clone; int ret; + BUILD_BUG_ON(BPF_F_REDIRECT_INTERNAL & BPF_F_REDIRECT_FLAGS); + if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL))) return -EINVAL; @@ -6279,24 +6281,16 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb, { int ret = BPF_MTU_CHK_RET_FRAG_NEEDED; struct net_device *dev = skb->dev; - int skb_len, dev_len; - int mtu = 0; - - if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) { - ret = -EINVAL; - goto out; - } + int mtu, dev_len, skb_len; - if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) { - ret = -EINVAL; - goto out; - } + if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) + return -EINVAL; + if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) + return -EINVAL; dev = __dev_via_ifindex(dev, ifindex); - if (unlikely(!dev)) { - ret = -ENODEV; - goto out; - } + if (unlikely(!dev)) + return -ENODEV; mtu = READ_ONCE(dev->mtu); dev_len = mtu + dev->hard_header_len; @@ -6331,19 +6325,15 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp, struct net_device *dev = xdp->rxq->dev; int xdp_len = xdp->data_end - xdp->data; int ret = BPF_MTU_CHK_RET_SUCCESS; - int mtu = 0, dev_len; + int mtu, dev_len; /* XDP variant doesn't support multi-buffer segment check (yet) */ - if (unlikely(flags)) { - ret = -EINVAL; - goto out; - } + if (unlikely(flags)) + return -EINVAL; dev = __dev_via_ifindex(dev, ifindex); - if (unlikely(!dev)) { - ret = -ENODEV; - goto out; - } + if (unlikely(!dev)) + return -ENODEV; mtu = READ_ONCE(dev->mtu); dev_len = mtu + dev->hard_header_len; @@ -6355,7 +6345,7 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp, xdp_len += len_diff; /* minus result pass check */ if (xdp_len > dev_len) ret = BPF_MTU_CHK_RET_FRAG_NEEDED; -out: + *mtu_len = mtu; return ret; } @@ -6366,7 +6356,7 @@ static const struct bpf_func_proto bpf_skb_check_mtu_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_WRITE | MEM_ALIGNED, .arg3_size = sizeof(u32), .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, @@ -6378,7 +6368,7 @@ static const struct bpf_func_proto bpf_xdp_check_mtu_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_WRITE | MEM_ALIGNED, .arg3_size = sizeof(u32), .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, diff --git a/net/core/gro.c b/net/core/gro.c index 802b4a062400..d1f44084e978 100644 --- a/net/core/gro.c +++ b/net/core/gro.c @@ -98,7 +98,6 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) unsigned int headlen = skb_headlen(skb); unsigned int len = skb_gro_len(skb); unsigned int delta_truesize; - unsigned int gro_max_size; unsigned int new_truesize; struct sk_buff *lp; int segs; @@ -112,12 +111,8 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) if (p->pp_recycle != skb->pp_recycle) return -ETOOMANYREFS; - /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */ - gro_max_size = p->protocol == htons(ETH_P_IPV6) ? - READ_ONCE(p->dev->gro_max_size) : - READ_ONCE(p->dev->gro_ipv4_max_size); - - if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush)) + if (unlikely(p->len + len >= netif_get_gro_max_size(p->dev, p) || + NAPI_GRO_CB(skb)->flush)) return -E2BIG; if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) { diff --git a/net/core/net-traces.c b/net/core/net-traces.c index 6aef976bc1da..f2fa34b1d78d 100644 --- a/net/core/net-traces.c +++ b/net/core/net-traces.c @@ -23,7 +23,7 @@ #include <linux/net_dropmon.h> #include <linux/slab.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <asm/bitops.h> #define CREATE_TRACE_POINTS diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index e39479f1c9a4..b231b27d8268 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -694,20 +694,18 @@ EXPORT_SYMBOL_GPL(get_net_ns); struct net *get_net_ns_by_fd(int fd) { - struct fd f = fdget(fd); - struct net *net = ERR_PTR(-EINVAL); + CLASS(fd, f)(fd); - if (!fd_file(f)) + if (fd_empty(f)) return ERR_PTR(-EBADF); if (proc_ns_file(fd_file(f))) { struct ns_common *ns = get_proc_ns(file_inode(fd_file(f))); if (ns->ops == &netns_operations) - net = get_net(container_of(ns, struct net, ns)); + return get_net(container_of(ns, struct net, ns)); } - fdput(f); - return net; + return ERR_PTR(-EINVAL); } EXPORT_SYMBOL_GPL(get_net_ns_by_fd); #endif diff --git a/net/core/netpoll.c b/net/core/netpoll.c index ca52cbe0f63c..aa49b92e9194 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -34,7 +34,7 @@ #include <net/addrconf.h> #include <net/ndisc.h> #include <net/ip6_checksum.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <trace/events/napi.h> #include <linux/kconfig.h> diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 34f68ef74b8f..7e23cacbe66e 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2285,7 +2285,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) s64 remaining; struct hrtimer_sleeper t; - hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + hrtimer_setup_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_set_expires(&t.timer, spin_until); remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index f0a520987085..2ba5cd965d3f 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -384,6 +384,35 @@ void rtnl_unregister_all(int protocol) } EXPORT_SYMBOL_GPL(rtnl_unregister_all); +int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n) +{ + const struct rtnl_msg_handler *handler; + int i, err; + + for (i = 0, handler = handlers; i < n; i++, handler++) { + err = rtnl_register_internal(handler->owner, handler->protocol, + handler->msgtype, handler->doit, + handler->dumpit, handler->flags); + if (err) { + __rtnl_unregister_many(handlers, i); + break; + } + } + + return err; +} +EXPORT_SYMBOL_GPL(__rtnl_register_many); + +void __rtnl_unregister_many(const struct rtnl_msg_handler *handlers, int n) +{ + const struct rtnl_msg_handler *handler; + int i; + + for (i = n - 1, handler = handlers + n - 1; i >= 0; i--, handler--) + rtnl_unregister(handler->protocol, handler->msgtype); +} +EXPORT_SYMBOL_GPL(__rtnl_unregister_many); + static LIST_HEAD(link_ops); static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) @@ -2003,7 +2032,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 }, - [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 }, + [IFLA_GSO_MAX_SIZE] = NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1), [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */ [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, @@ -2028,7 +2057,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT }, [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT }, [IFLA_ALLMULTI] = { .type = NLA_REJECT }, - [IFLA_GSO_IPV4_MAX_SIZE] = { .type = NLA_U32 }, + [IFLA_GSO_IPV4_MAX_SIZE] = NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1), [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 }, }; diff --git a/net/core/sock.c b/net/core/sock.c index fe87f9bd8f16..da50df485090 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -85,7 +85,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/errqueue.h> @@ -1052,32 +1052,34 @@ static int sock_reserve_memory(struct sock *sk, int bytes) #ifdef CONFIG_PAGE_POOL -/* This is the number of tokens that the user can SO_DEVMEM_DONTNEED in - * 1 syscall. The limit exists to limit the amount of memory the kernel - * allocates to copy these tokens. +/* This is the number of tokens and frags that the user can SO_DEVMEM_DONTNEED + * in 1 syscall. The limit exists to limit the amount of memory the kernel + * allocates to copy these tokens, and to prevent looping over the frags for + * too long. */ #define MAX_DONTNEED_TOKENS 128 +#define MAX_DONTNEED_FRAGS 1024 static noinline_for_stack int sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen) { unsigned int num_tokens, i, j, k, netmem_num = 0; struct dmabuf_token *tokens; + int ret = 0, num_frags = 0; netmem_ref netmems[16]; - int ret = 0; if (!sk_is_tcp(sk)) return -EBADF; - if (optlen % sizeof(struct dmabuf_token) || + if (optlen % sizeof(*tokens) || optlen > sizeof(*tokens) * MAX_DONTNEED_TOKENS) return -EINVAL; - tokens = kvmalloc_array(optlen, sizeof(*tokens), GFP_KERNEL); + num_tokens = optlen / sizeof(*tokens); + tokens = kvmalloc_array(num_tokens, sizeof(*tokens), GFP_KERNEL); if (!tokens) return -ENOMEM; - num_tokens = optlen / sizeof(struct dmabuf_token); if (copy_from_sockptr(tokens, optval, optlen)) { kvfree(tokens); return -EFAULT; @@ -1086,24 +1088,28 @@ sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen) xa_lock_bh(&sk->sk_user_frags); for (i = 0; i < num_tokens; i++) { for (j = 0; j < tokens[i].token_count; j++) { + if (++num_frags > MAX_DONTNEED_FRAGS) + goto frag_limit_reached; + netmem_ref netmem = (__force netmem_ref)__xa_erase( &sk->sk_user_frags, tokens[i].token_start + j); - if (netmem && - !WARN_ON_ONCE(!netmem_is_net_iov(netmem))) { - netmems[netmem_num++] = netmem; - if (netmem_num == ARRAY_SIZE(netmems)) { - xa_unlock_bh(&sk->sk_user_frags); - for (k = 0; k < netmem_num; k++) - WARN_ON_ONCE(!napi_pp_put_page(netmems[k])); - netmem_num = 0; - xa_lock_bh(&sk->sk_user_frags); - } - ret++; + if (!netmem || WARN_ON_ONCE(!netmem_is_net_iov(netmem))) + continue; + + netmems[netmem_num++] = netmem; + if (netmem_num == ARRAY_SIZE(netmems)) { + xa_unlock_bh(&sk->sk_user_frags); + for (k = 0; k < netmem_num; k++) + WARN_ON_ONCE(!napi_pp_put_page(netmems[k])); + netmem_num = 0; + xa_lock_bh(&sk->sk_user_frags); } + ret++; } } +frag_limit_reached: xa_unlock_bh(&sk->sk_user_frags); for (k = 0; k < netmem_num; k++) WARN_ON_ONCE(!napi_pp_put_page(netmems[k])); diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 242c91a6e3d3..78347d7d25ef 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -647,6 +647,8 @@ BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb, sk = __sock_map_lookup_elem(map, key); if (unlikely(!sk || !sock_map_redirect_allowed(sk))) return SK_DROP; + if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk)) + return SK_DROP; skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS); return SK_PASS; @@ -675,6 +677,8 @@ BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg, return SK_DROP; if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk)) return SK_DROP; + if (sk_is_vsock(sk)) + return SK_DROP; msg->flags = flags; msg->sk_redir = sk; @@ -1249,6 +1253,8 @@ BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb, sk = __sock_hash_lookup_elem(map, key); if (unlikely(!sk || !sock_map_redirect_allowed(sk))) return SK_DROP; + if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk)) + return SK_DROP; skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS); return SK_PASS; @@ -1277,6 +1283,8 @@ BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg, return SK_DROP; if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk)) return SK_DROP; + if (sk_is_vsock(sk)) + return SK_DROP; msg->flags = flags; msg->sk_redir = sk; @@ -1752,6 +1760,10 @@ static int sock_map_link_update_prog(struct bpf_link *link, ret = -EINVAL; goto out; } + if (!sockmap_link->map) { + ret = -ENOLINK; + goto out; + } ret = sock_map_prog_link_lookup(sockmap_link->map, &pprog, &plink, sockmap_link->attach_type); diff --git a/net/core/tso.c b/net/core/tso.c index e00796e3b146..6df997b9076e 100644 --- a/net/core/tso.c +++ b/net/core/tso.c @@ -3,7 +3,7 @@ #include <linux/if_vlan.h> #include <net/ip.h> #include <net/tso.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last) |