diff options
Diffstat (limited to 'net')
92 files changed, 950 insertions, 507 deletions
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index b44c382226a1..760d51fdbdf6 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -948,16 +948,25 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv) int tt_diff_len, tt_change_len = 0; int tt_diff_entries_num = 0; int tt_diff_entries_count = 0; + bool drop_changes = false; + size_t tt_extra_len = 0; u16 tvlv_len; tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes); tt_diff_len = batadv_tt_len(tt_diff_entries_num); /* if we have too many changes for one packet don't send any - * and wait for the tt table request which will be fragmented + * and wait for the tt table request so we can reply with the full + * (fragmented) table. + * + * The local change history should still be cleaned up so the next + * TT round can start again with a clean state. */ - if (tt_diff_len > bat_priv->soft_iface->mtu) + if (tt_diff_len > bat_priv->soft_iface->mtu) { tt_diff_len = 0; + tt_diff_entries_num = 0; + drop_changes = true; + } tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv, &tt_data, &tt_change, &tt_diff_len); @@ -966,7 +975,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv) tt_data->flags = BATADV_TT_OGM_DIFF; - if (tt_diff_len == 0) + if (!drop_changes && tt_diff_len == 0) goto container_register; spin_lock_bh(&bat_priv->tt.changes_list_lock); @@ -985,6 +994,9 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv) } spin_unlock_bh(&bat_priv->tt.changes_list_lock); + tt_extra_len = batadv_tt_len(tt_diff_entries_num - + tt_diff_entries_count); + /* Keep the buffer for possible tt_request */ spin_lock_bh(&bat_priv->tt.last_changeset_lock); kfree(bat_priv->tt.last_changeset); @@ -993,6 +1005,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv) tt_change_len = batadv_tt_len(tt_diff_entries_count); /* check whether this new OGM has no changes due to size problems */ if (tt_diff_entries_count > 0) { + tt_diff_len -= tt_extra_len; /* if kmalloc() fails we will reply with the full table * instead of providing the diff */ @@ -1005,6 +1018,8 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv) } spin_unlock_bh(&bat_priv->tt.last_changeset_lock); + /* Remove extra packet space for OGM */ + tvlv_len -= tt_extra_len; container_register: batadv_tvlv_container_register(bat_priv, BATADV_TVLV_TT, 1, tt_data, tvlv_len); @@ -2705,14 +2720,16 @@ static bool batadv_tt_global_valid(const void *entry_ptr, * * Fills the tvlv buff with the tt entries from the specified hash. If valid_cb * is not provided then this becomes a no-op. + * + * Return: Remaining unused length in tvlv_buff. */ -static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, - struct batadv_hashtable *hash, - void *tvlv_buff, u16 tt_len, - bool (*valid_cb)(const void *, - const void *, - u8 *flags), - void *cb_data) +static u16 batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, + struct batadv_hashtable *hash, + void *tvlv_buff, u16 tt_len, + bool (*valid_cb)(const void *, + const void *, + u8 *flags), + void *cb_data) { struct batadv_tt_common_entry *tt_common_entry; struct batadv_tvlv_tt_change *tt_change; @@ -2726,7 +2743,7 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, tt_change = tvlv_buff; if (!valid_cb) - return; + return tt_len; rcu_read_lock(); for (i = 0; i < hash->size; i++) { @@ -2752,6 +2769,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, } } rcu_read_unlock(); + + return batadv_tt_len(tt_tot - tt_num_entries); } /** @@ -3022,10 +3041,11 @@ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv, goto out; /* fill the rest of the tvlv with the real TT entries */ - batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.global_hash, - tt_change, tt_len, - batadv_tt_global_valid, - req_dst_orig_node); + tvlv_len -= batadv_tt_tvlv_generate(bat_priv, + bat_priv->tt.global_hash, + tt_change, tt_len, + batadv_tt_global_valid, + req_dst_orig_node); } /* Don't send the response, if larger than fragmented packet. */ @@ -3149,9 +3169,11 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv, goto out; /* fill the rest of the tvlv with the real TT entries */ - batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.local_hash, - tt_change, tt_len, - batadv_tt_local_valid, NULL); + tvlv_len -= batadv_tt_tvlv_generate(bat_priv, + bat_priv->tt.local_hash, + tt_change, tt_len, + batadv_tt_local_valid, + NULL); } tvlv_tt_data->flags = BATADV_TT_RESPONSE; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index f9e19f9cb5a3..18ab5628f85a 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -57,7 +57,6 @@ DEFINE_RWLOCK(hci_dev_list_lock); /* HCI callback list */ LIST_HEAD(hci_cb_list); -DEFINE_MUTEX(hci_cb_list_lock); /* HCI ID Numbering */ static DEFINE_IDA(hci_index_ida); @@ -2993,9 +2992,7 @@ int hci_register_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); - mutex_lock(&hci_cb_list_lock); - list_add_tail(&cb->list, &hci_cb_list); - mutex_unlock(&hci_cb_list_lock); + list_add_tail_rcu(&cb->list, &hci_cb_list); return 0; } @@ -3005,9 +3002,8 @@ int hci_unregister_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); - mutex_lock(&hci_cb_list_lock); - list_del(&cb->list); - mutex_unlock(&hci_cb_list_lock); + list_del_rcu(&cb->list); + synchronize_rcu(); return 0; } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 1427d6e2f3c9..2cc7a9306350 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -6870,38 +6870,27 @@ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data, return; hci_dev_lock(hdev); - rcu_read_lock(); /* Connect all BISes that are bound to the BIG */ - list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { - if (bacmp(&conn->dst, BDADDR_ANY) || - conn->type != ISO_LINK || - conn->iso_qos.bcast.big != ev->handle) + while ((conn = hci_conn_hash_lookup_big_state(hdev, ev->handle, + BT_BOUND))) { + if (ev->status) { + hci_connect_cfm(conn, ev->status); + hci_conn_del(conn); continue; + } if (hci_conn_set_handle(conn, __le16_to_cpu(ev->bis_handle[i++]))) continue; - if (!ev->status) { - conn->state = BT_CONNECTED; - set_bit(HCI_CONN_BIG_CREATED, &conn->flags); - rcu_read_unlock(); - hci_debugfs_create_conn(conn); - hci_conn_add_sysfs(conn); - hci_iso_setup_path(conn); - rcu_read_lock(); - continue; - } - - hci_connect_cfm(conn, ev->status); - rcu_read_unlock(); - hci_conn_del(conn); - rcu_read_lock(); + conn->state = BT_CONNECTED; + set_bit(HCI_CONN_BIG_CREATED, &conn->flags); + hci_debugfs_create_conn(conn); + hci_conn_add_sysfs(conn); + hci_iso_setup_path(conn); } - rcu_read_unlock(); - if (!ev->status && !i) /* If no BISes have been connected for the BIG, * terminate. This is in case all bound connections diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 2272e1849ebd..022b86797acd 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -1926,7 +1926,7 @@ drop: } static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname, - sockptr_t optval, unsigned int len) + sockptr_t optval, unsigned int optlen) { struct hci_ufilter uf = { .opcode = 0 }; struct sock *sk = sock->sk; @@ -1943,7 +1943,7 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname, switch (optname) { case HCI_DATA_DIR: - err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len); + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; @@ -1954,7 +1954,7 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname, break; case HCI_TIME_STAMP: - err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len); + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; @@ -1974,7 +1974,7 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname, uf.event_mask[1] = *((u32 *) f->event_mask + 1); } - err = bt_copy_from_sockptr(&uf, sizeof(uf), optval, len); + err = copy_safe_from_sockptr(&uf, sizeof(uf), optval, optlen); if (err) break; @@ -2005,7 +2005,7 @@ done: } static int hci_sock_setsockopt(struct socket *sock, int level, int optname, - sockptr_t optval, unsigned int len) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int err = 0; @@ -2015,7 +2015,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, if (level == SOL_HCI) return hci_sock_setsockopt_old(sock, level, optname, optval, - len); + optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; @@ -2035,7 +2035,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, goto done; } - err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len); + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index 1b40fd2b2f02..43d0ebe11100 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -1129,6 +1129,7 @@ static int iso_listen_bis(struct sock *sk) return -EHOSTUNREACH; hci_dev_lock(hdev); + lock_sock(sk); /* Fail if user set invalid QoS */ if (iso_pi(sk)->qos_user_set && !check_bcast_qos(&iso_pi(sk)->qos)) { @@ -1158,10 +1159,10 @@ static int iso_listen_bis(struct sock *sk) goto unlock; } - hci_dev_put(hdev); - unlock: + release_sock(sk); hci_dev_unlock(hdev); + hci_dev_put(hdev); return err; } @@ -1188,6 +1189,7 @@ static int iso_sock_listen(struct socket *sock, int backlog) BT_DBG("sk %p backlog %d", sk, backlog); + sock_hold(sk); lock_sock(sk); if (sk->sk_state != BT_BOUND) { @@ -1200,10 +1202,16 @@ static int iso_sock_listen(struct socket *sock, int backlog) goto done; } - if (!bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) + if (!bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) { err = iso_listen_cis(sk); - else + } else { + /* Drop sock lock to avoid potential + * deadlock with the hdev lock. + */ + release_sock(sk); err = iso_listen_bis(sk); + lock_sock(sk); + } if (err) goto done; @@ -1215,6 +1223,7 @@ static int iso_sock_listen(struct socket *sock, int backlog) done: release_sock(sk); + sock_put(sk); return err; } @@ -1226,7 +1235,11 @@ static int iso_sock_accept(struct socket *sock, struct socket *newsock, long timeo; int err = 0; - lock_sock(sk); + /* Use explicit nested locking to avoid lockdep warnings generated + * because the parent socket and the child socket are locked on the + * same thread. + */ + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK); @@ -1257,7 +1270,7 @@ static int iso_sock_accept(struct socket *sock, struct socket *newsock, release_sock(sk); timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); - lock_sock(sk); + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); } remove_wait_queue(sk_sleep(sk), &wait); @@ -1398,6 +1411,7 @@ static void iso_conn_big_sync(struct sock *sk) * change. */ hci_dev_lock(hdev); + lock_sock(sk); if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) { err = hci_le_big_create_sync(hdev, iso_pi(sk)->conn->hcon, @@ -1410,6 +1424,7 @@ static void iso_conn_big_sync(struct sock *sk) err); } + release_sock(sk); hci_dev_unlock(hdev); } @@ -1418,39 +1433,57 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg, { struct sock *sk = sock->sk; struct iso_pinfo *pi = iso_pi(sk); + bool early_ret = false; + int err = 0; BT_DBG("sk %p", sk); if (test_and_clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { + sock_hold(sk); lock_sock(sk); + switch (sk->sk_state) { case BT_CONNECT2: if (test_bit(BT_SK_PA_SYNC, &pi->flags)) { + release_sock(sk); iso_conn_big_sync(sk); + lock_sock(sk); + sk->sk_state = BT_LISTEN; } else { iso_conn_defer_accept(pi->conn->hcon); sk->sk_state = BT_CONFIG; } - release_sock(sk); - return 0; + + early_ret = true; + break; case BT_CONNECTED: if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) { + release_sock(sk); iso_conn_big_sync(sk); + lock_sock(sk); + sk->sk_state = BT_LISTEN; - release_sock(sk); - return 0; + early_ret = true; } - release_sock(sk); break; case BT_CONNECT: release_sock(sk); - return iso_connect_cis(sk); + err = iso_connect_cis(sk); + lock_sock(sk); + + early_ret = true; + break; default: - release_sock(sk); break; } + + release_sock(sk); + sock_put(sk); + + if (early_ret) + return err; } return bt_sock_recvmsg(sock, msg, len, flags); @@ -1566,7 +1599,7 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname, break; } - err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen); + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; @@ -1577,7 +1610,7 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname, break; case BT_PKT_STATUS: - err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen); + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; @@ -1596,7 +1629,7 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname, break; } - err = bt_copy_from_sockptr(&qos, sizeof(qos), optval, optlen); + err = copy_safe_from_sockptr(&qos, sizeof(qos), optval, optlen); if (err) break; @@ -1617,8 +1650,8 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname, break; } - err = bt_copy_from_sockptr(iso_pi(sk)->base, optlen, optval, - optlen); + err = copy_safe_from_sockptr(iso_pi(sk)->base, optlen, optval, + optlen); if (err) break; @@ -2118,6 +2151,11 @@ done: return HCI_LM_ACCEPT; } +static bool iso_match(struct hci_conn *hcon) +{ + return hcon->type == ISO_LINK || hcon->type == LE_LINK; +} + static void iso_connect_cfm(struct hci_conn *hcon, __u8 status) { if (hcon->type != ISO_LINK) { @@ -2299,6 +2337,7 @@ drop: static struct hci_cb iso_cb = { .name = "ISO", + .match = iso_match, .connect_cfm = iso_connect_cfm, .disconn_cfm = iso_disconn_cfm, }; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 6544c1ed7143..27b4c4a2ba1f 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -7217,6 +7217,11 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, return NULL; } +static bool l2cap_match(struct hci_conn *hcon) +{ + return hcon->type == ACL_LINK || hcon->type == LE_LINK; +} + static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) { struct hci_dev *hdev = hcon->hdev; @@ -7224,9 +7229,6 @@ static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) struct l2cap_chan *pchan; u8 dst_type; - if (hcon->type != ACL_LINK && hcon->type != LE_LINK) - return; - BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); if (status) { @@ -7291,9 +7293,6 @@ int l2cap_disconn_ind(struct hci_conn *hcon) static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) { - if (hcon->type != ACL_LINK && hcon->type != LE_LINK) - return; - BT_DBG("hcon %p reason %d", hcon, reason); l2cap_conn_del(hcon, bt_to_errno(reason)); @@ -7572,6 +7571,7 @@ drop: static struct hci_cb l2cap_cb = { .name = "L2CAP", + .match = l2cap_match, .connect_cfm = l2cap_connect_cfm, .disconn_cfm = l2cap_disconn_cfm, .security_cfm = l2cap_security_cfm, diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 18e89e764f3b..3d2553dcdb1b 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -755,7 +755,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, opts.max_tx = chan->max_tx; opts.txwin_size = chan->tx_win; - err = bt_copy_from_sockptr(&opts, sizeof(opts), optval, optlen); + err = copy_safe_from_sockptr(&opts, sizeof(opts), optval, + optlen); if (err) break; @@ -800,7 +801,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, break; case L2CAP_LM: - err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen); + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; @@ -909,7 +910,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, sec.level = BT_SECURITY_LOW; - err = bt_copy_from_sockptr(&sec, sizeof(sec), optval, optlen); + err = copy_safe_from_sockptr(&sec, sizeof(sec), optval, optlen); if (err) break; @@ -956,7 +957,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen); + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; @@ -970,7 +971,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; case BT_FLUSHABLE: - err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen); + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; @@ -1004,7 +1005,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, pwr.force_active = BT_POWER_FORCE_ACTIVE_ON; - err = bt_copy_from_sockptr(&pwr, sizeof(pwr), optval, optlen); + err = copy_safe_from_sockptr(&pwr, sizeof(pwr), optval, optlen); if (err) break; @@ -1015,7 +1016,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; case BT_CHANNEL_POLICY: - err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen); + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; @@ -1046,7 +1047,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - err = bt_copy_from_sockptr(&mtu, sizeof(mtu), optval, optlen); + err = copy_safe_from_sockptr(&mtu, sizeof(mtu), optval, optlen); if (err) break; @@ -1076,7 +1077,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - err = bt_copy_from_sockptr(&mode, sizeof(mode), optval, optlen); + err = copy_safe_from_sockptr(&mode, sizeof(mode), optval, + optlen); if (err) break; diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index ad5177e3a69b..4c56ca5a216c 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -2134,6 +2134,11 @@ static int rfcomm_run(void *unused) return 0; } +static bool rfcomm_match(struct hci_conn *hcon) +{ + return hcon->type == ACL_LINK; +} + static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt) { struct rfcomm_session *s; @@ -2180,6 +2185,7 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt) static struct hci_cb rfcomm_cb = { .name = "RFCOMM", + .match = rfcomm_match, .security_cfm = rfcomm_security_cfm }; diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 40766f8119ed..913402806fa0 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -629,10 +629,9 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, switch (optname) { case RFCOMM_LM: - if (bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen)) { - err = -EFAULT; + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); + if (err) break; - } if (opt & RFCOMM_LM_FIPS) { err = -EINVAL; @@ -685,7 +684,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, sec.level = BT_SECURITY_LOW; - err = bt_copy_from_sockptr(&sec, sizeof(sec), optval, optlen); + err = copy_safe_from_sockptr(&sec, sizeof(sec), optval, optlen); if (err) break; @@ -703,7 +702,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, break; } - err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen); + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 78f7bca24487..aa7bfe26cb40 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -319,10 +319,13 @@ static int sco_connect(struct sock *sk) else type = SCO_LINK; - if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT && - (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) { - err = -EOPNOTSUPP; - goto unlock; + switch (sco_pi(sk)->setting & SCO_AIRMODE_MASK) { + case SCO_AIRMODE_TRANSP: + if (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev)) { + err = -EOPNOTSUPP; + goto unlock; + } + break; } hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst, @@ -896,7 +899,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, break; } - err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen); + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; @@ -915,18 +918,11 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, voice.setting = sco_pi(sk)->setting; - err = bt_copy_from_sockptr(&voice, sizeof(voice), optval, - optlen); + err = copy_safe_from_sockptr(&voice, sizeof(voice), optval, + optlen); if (err) break; - /* Explicitly check for these values */ - if (voice.setting != BT_VOICE_TRANSPARENT && - voice.setting != BT_VOICE_CVSD_16BIT) { - err = -EINVAL; - break; - } - sco_pi(sk)->setting = voice.setting; hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR); @@ -934,14 +930,19 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, err = -EBADFD; break; } - if (enhanced_sync_conn_capable(hdev) && - voice.setting == BT_VOICE_TRANSPARENT) - sco_pi(sk)->codec.id = BT_CODEC_TRANSPARENT; + + switch (sco_pi(sk)->setting & SCO_AIRMODE_MASK) { + case SCO_AIRMODE_TRANSP: + if (enhanced_sync_conn_capable(hdev)) + sco_pi(sk)->codec.id = BT_CODEC_TRANSPARENT; + break; + } + hci_dev_put(hdev); break; case BT_PKT_STATUS: - err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen); + err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; @@ -984,7 +985,8 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, break; } - err = bt_copy_from_sockptr(buffer, optlen, optval, optlen); + err = copy_struct_from_sockptr(buffer, sizeof(buffer), optval, + optlen); if (err) { hci_dev_put(hdev); break; @@ -1396,11 +1398,13 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) return lm; } -static void sco_connect_cfm(struct hci_conn *hcon, __u8 status) +static bool sco_match(struct hci_conn *hcon) { - if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) - return; + return hcon->type == SCO_LINK || hcon->type == ESCO_LINK; +} +static void sco_connect_cfm(struct hci_conn *hcon, __u8 status) +{ BT_DBG("hcon %p bdaddr %pMR status %u", hcon, &hcon->dst, status); if (!status) { @@ -1415,9 +1419,6 @@ static void sco_connect_cfm(struct hci_conn *hcon, __u8 status) static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) { - if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) - return; - BT_DBG("hcon %p reason %d", hcon, reason); sco_conn_del(hcon, bt_to_errno(reason)); @@ -1443,6 +1444,7 @@ drop: static struct hci_cb sco_cb = { .name = "SCO", + .match = sco_match, .connect_cfm = sco_connect_cfm, .disconn_cfm = sco_disconn_cfm, }; diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c index 319f47df3330..95f7a7e65a73 100644 --- a/net/can/j1939/transport.c +++ b/net/can/j1939/transport.c @@ -1505,7 +1505,7 @@ static struct j1939_session *j1939_session_new(struct j1939_priv *priv, session->state = J1939_SESSION_NEW; skb_queue_head_init(&session->skb_queue); - skb_queue_tail(&session->skb_queue, skb); + skb_queue_tail(&session->skb_queue, skb_get(skb)); skcb = j1939_skb_to_cb(skb); memcpy(&session->skcb, skcb, sizeof(session->skcb)); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 9b1168eb77ab..b24afec24138 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1173,6 +1173,8 @@ EXPORT_SYMBOL(ceph_osdc_new_request); int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt) { + WARN_ON(op->op != CEPH_OSD_OP_SPARSE_READ); + op->extent.sparse_ext_cnt = cnt; op->extent.sparse_ext = kmalloc_array(cnt, sizeof(*op->extent.sparse_ext), diff --git a/net/core/dev.c b/net/core/dev.c index 13d00fc10f55..faa23042df38 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3642,8 +3642,10 @@ int skb_csum_hwoffload_help(struct sk_buff *skb, if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { if (vlan_get_protocol(skb) == htons(ETH_P_IPV6) && - skb_network_header_len(skb) != sizeof(struct ipv6hdr)) + skb_network_header_len(skb) != sizeof(struct ipv6hdr) && + !ipv6_has_hopopt_jumbo(skb)) goto sw_checksum; + switch (skb->csum_offset) { case offsetof(struct tcphdr, check): case offsetof(struct udphdr, check): @@ -6557,18 +6559,22 @@ static void __napi_hash_add_with_id(struct napi_struct *napi, static void napi_hash_add_with_id(struct napi_struct *napi, unsigned int napi_id) { - spin_lock(&napi_hash_lock); + unsigned long flags; + + spin_lock_irqsave(&napi_hash_lock, flags); WARN_ON_ONCE(napi_by_id(napi_id)); __napi_hash_add_with_id(napi, napi_id); - spin_unlock(&napi_hash_lock); + spin_unlock_irqrestore(&napi_hash_lock, flags); } static void napi_hash_add(struct napi_struct *napi) { + unsigned long flags; + if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state)) return; - spin_lock(&napi_hash_lock); + spin_lock_irqsave(&napi_hash_lock, flags); /* 0..NR_CPUS range is reserved for sender_cpu use */ do { @@ -6578,7 +6584,7 @@ static void napi_hash_add(struct napi_struct *napi) __napi_hash_add_with_id(napi, napi_gen_id); - spin_unlock(&napi_hash_lock); + spin_unlock_irqrestore(&napi_hash_lock, flags); } /* Warning : caller is responsible to make sure rcu grace period @@ -6586,11 +6592,13 @@ static void napi_hash_add(struct napi_struct *napi) */ static void napi_hash_del(struct napi_struct *napi) { - spin_lock(&napi_hash_lock); + unsigned long flags; + + spin_lock_irqsave(&napi_hash_lock, flags); hlist_del_init_rcu(&napi->napi_hash_node); - spin_unlock(&napi_hash_lock); + spin_unlock_irqrestore(&napi_hash_lock, flags); } static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) diff --git a/net/core/filter.c b/net/core/filter.c index 6625b3f563a4..834614071727 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3734,13 +3734,22 @@ static const struct bpf_func_proto bpf_skb_adjust_room_proto = { static u32 __bpf_skb_min_len(const struct sk_buff *skb) { - u32 min_len = skb_network_offset(skb); + int offset = skb_network_offset(skb); + u32 min_len = 0; - if (skb_transport_header_was_set(skb)) - min_len = skb_transport_offset(skb); - if (skb->ip_summed == CHECKSUM_PARTIAL) - min_len = skb_checksum_start_offset(skb) + - skb->csum_offset + sizeof(__sum16); + if (offset > 0) + min_len = offset; + if (skb_transport_header_was_set(skb)) { + offset = skb_transport_offset(skb); + if (offset > 0) + min_len = offset; + } + if (skb->ip_summed == CHECKSUM_PARTIAL) { + offset = skb_checksum_start_offset(skb) + + skb->csum_offset + sizeof(__sum16); + if (offset > 0) + min_len = offset; + } return min_len; } @@ -7899,42 +7908,37 @@ static const struct bpf_func_proto bpf_tcp_raw_check_syncookie_ipv6_proto = { #endif /* CONFIG_INET */ -bool bpf_helper_changes_pkt_data(void *func) -{ - if (func == bpf_skb_vlan_push || - func == bpf_skb_vlan_pop || - func == bpf_skb_store_bytes || - func == bpf_skb_change_proto || - func == bpf_skb_change_head || - func == sk_skb_change_head || - func == bpf_skb_change_tail || - func == sk_skb_change_tail || - func == bpf_skb_adjust_room || - func == sk_skb_adjust_room || - func == bpf_skb_pull_data || - func == sk_skb_pull_data || - func == bpf_clone_redirect || - func == bpf_l3_csum_replace || - func == bpf_l4_csum_replace || - func == bpf_xdp_adjust_head || - func == bpf_xdp_adjust_meta || - func == bpf_msg_pull_data || - func == bpf_msg_push_data || - func == bpf_msg_pop_data || - func == bpf_xdp_adjust_tail || -#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) - func == bpf_lwt_seg6_store_bytes || - func == bpf_lwt_seg6_adjust_srh || - func == bpf_lwt_seg6_action || -#endif -#ifdef CONFIG_INET - func == bpf_sock_ops_store_hdr_opt || -#endif - func == bpf_lwt_in_push_encap || - func == bpf_lwt_xmit_push_encap) +bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id) +{ + switch (func_id) { + case BPF_FUNC_clone_redirect: + case BPF_FUNC_l3_csum_replace: + case BPF_FUNC_l4_csum_replace: + case BPF_FUNC_lwt_push_encap: + case BPF_FUNC_lwt_seg6_action: + case BPF_FUNC_lwt_seg6_adjust_srh: + case BPF_FUNC_lwt_seg6_store_bytes: + case BPF_FUNC_msg_pop_data: + case BPF_FUNC_msg_pull_data: + case BPF_FUNC_msg_push_data: + case BPF_FUNC_skb_adjust_room: + case BPF_FUNC_skb_change_head: + case BPF_FUNC_skb_change_proto: + case BPF_FUNC_skb_change_tail: + case BPF_FUNC_skb_pull_data: + case BPF_FUNC_skb_store_bytes: + case BPF_FUNC_skb_vlan_pop: + case BPF_FUNC_skb_vlan_push: + case BPF_FUNC_store_hdr_opt: + case BPF_FUNC_xdp_adjust_head: + case BPF_FUNC_xdp_adjust_meta: + case BPF_FUNC_xdp_adjust_tail: + /* tail-called program could call any of the above */ + case BPF_FUNC_tail_call: return true; - - return false; + default: + return false; + } } const struct bpf_func_proto bpf_event_output_data_proto __weak; diff --git a/net/core/link_watch.c b/net/core/link_watch.c index ab150641142a..1b4d39e38084 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -45,9 +45,14 @@ static unsigned int default_operstate(const struct net_device *dev) int iflink = dev_get_iflink(dev); struct net_device *peer; - if (iflink == dev->ifindex) + /* If called from netdev_run_todo()/linkwatch_sync_dev(), + * dev_net(dev) can be already freed, and RTNL is not held. + */ + if (dev->reg_state == NETREG_UNREGISTERED || + iflink == dev->ifindex) return IF_OPER_DOWN; + ASSERT_RTNL(); peer = __dev_get_by_index(dev_net(dev), iflink); if (!peer) return IF_OPER_DOWN; diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index ae34ac818cda..b5cd3ae4f04c 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -449,6 +449,21 @@ out_free: goto out; } +static LLIST_HEAD(defer_free_list); + +static void net_complete_free(void) +{ + struct llist_node *kill_list; + struct net *net, *next; + + /* Get the list of namespaces to free from last round. */ + kill_list = llist_del_all(&defer_free_list); + + llist_for_each_entry_safe(net, next, kill_list, defer_free_list) + kmem_cache_free(net_cachep, net); + +} + static void net_free(struct net *net) { if (refcount_dec_and_test(&net->passive)) { @@ -457,7 +472,8 @@ static void net_free(struct net *net) /* There should not be any trackers left there. */ ref_tracker_dir_exit(&net->notrefcnt_tracker); - kmem_cache_free(net_cachep, net); + /* Wait for an extra rcu_barrier() before final free. */ + llist_add(&net->defer_free_list, &defer_free_list); } } @@ -642,6 +658,8 @@ static void cleanup_net(struct work_struct *work) */ rcu_barrier(); + net_complete_free(); + /* Finally it is safe to free my network namespace structure */ list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { list_del_init(&net->exit_list); diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c index 9527dd46e4dc..b0772d135efb 100644 --- a/net/core/netdev-genl.c +++ b/net/core/netdev-genl.c @@ -246,8 +246,12 @@ int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info) rcu_read_unlock(); rtnl_unlock(); - if (err) + if (err) { + goto err_free_msg; + } else if (!rsp->len) { + err = -ENOENT; goto err_free_msg; + } return genlmsg_reply(rsp, info); @@ -430,10 +434,10 @@ static int netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, u32 q_type, const struct genl_info *info) { - int err = 0; + int err; if (!(netdev->flags & IFF_UP)) - return err; + return -ENOENT; err = netdev_nl_queue_validate(netdev, q_idx, q_type); if (err) @@ -488,24 +492,21 @@ netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp, struct netdev_nl_dump_ctx *ctx) { int err = 0; - int i; if (!(netdev->flags & IFF_UP)) return err; - for (i = ctx->rxq_idx; i < netdev->real_num_rx_queues;) { - err = netdev_nl_queue_fill_one(rsp, netdev, i, + for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) { + err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx, NETDEV_QUEUE_TYPE_RX, info); if (err) return err; - ctx->rxq_idx = i++; } - for (i = ctx->txq_idx; i < netdev->real_num_tx_queues;) { - err = netdev_nl_queue_fill_one(rsp, netdev, i, + for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) { + err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx, NETDEV_QUEUE_TYPE_TX, info); if (err) return err; - ctx->txq_idx = i++; } return err; @@ -671,7 +672,7 @@ netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp, i, info); if (err) return err; - ctx->rxq_idx = i++; + ctx->rxq_idx = ++i; } i = ctx->txq_idx; while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) { @@ -679,7 +680,7 @@ netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp, i, info); if (err) return err; - ctx->txq_idx = i++; + ctx->txq_idx = ++i; } ctx->rxq_idx = 0; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 58df76fe408a..d9f959c619d9 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3746,6 +3746,7 @@ static int rtnl_group_changelink(const struct sk_buff *skb, static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, const struct rtnl_link_ops *ops, struct net *tgt_net, struct net *link_net, + struct net *peer_net, const struct nlmsghdr *nlh, struct nlattr **tb, struct nlattr **data, struct netlink_ext_ack *extack) @@ -3776,8 +3777,13 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, dev->ifindex = ifm->ifi_index; + if (link_net) + net = link_net; + if (peer_net) + net = peer_net; + if (ops->newlink) - err = ops->newlink(link_net ? : net, dev, tb, data, extack); + err = ops->newlink(net, dev, tb, data, extack); else err = register_netdevice(dev); if (err < 0) { @@ -3812,40 +3818,34 @@ out_unregister: goto out; } -static int rtnl_add_peer_net(struct rtnl_nets *rtnl_nets, - const struct rtnl_link_ops *ops, - struct nlattr *data[], - struct netlink_ext_ack *extack) +static struct net *rtnl_get_peer_net(const struct rtnl_link_ops *ops, + struct nlattr *tbp[], + struct nlattr *data[], + struct netlink_ext_ack *extack) { struct nlattr *tb[IFLA_MAX + 1]; - struct net *net; int err; if (!data || !data[ops->peer_type]) - return 0; + return rtnl_link_get_net_ifla(tbp); err = rtnl_nla_parse_ifinfomsg(tb, data[ops->peer_type], extack); if (err < 0) - return err; + return ERR_PTR(err); if (ops->validate) { err = ops->validate(tb, NULL, extack); if (err < 0) - return err; + return ERR_PTR(err); } - net = rtnl_link_get_net_ifla(tb); - if (IS_ERR(net)) - return PTR_ERR(net); - if (net) - rtnl_nets_add(rtnl_nets, net); - - return 0; + return rtnl_link_get_net_ifla(tb); } static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, const struct rtnl_link_ops *ops, struct net *tgt_net, struct net *link_net, + struct net *peer_net, struct rtnl_newlink_tbs *tbs, struct nlattr **data, struct netlink_ext_ack *extack) @@ -3894,14 +3894,15 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, return -EOPNOTSUPP; } - return rtnl_newlink_create(skb, ifm, ops, tgt_net, link_net, nlh, tb, data, extack); + return rtnl_newlink_create(skb, ifm, ops, tgt_net, link_net, peer_net, nlh, + tb, data, extack); } static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { + struct net *tgt_net, *link_net = NULL, *peer_net = NULL; struct nlattr **tb, **linkinfo, **data = NULL; - struct net *tgt_net, *link_net = NULL; struct rtnl_link_ops *ops = NULL; struct rtnl_newlink_tbs *tbs; struct rtnl_nets rtnl_nets; @@ -3971,9 +3972,13 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, } if (ops->peer_type) { - ret = rtnl_add_peer_net(&rtnl_nets, ops, data, extack); - if (ret < 0) + peer_net = rtnl_get_peer_net(ops, tb, data, extack); + if (IS_ERR(peer_net)) { + ret = PTR_ERR(peer_net); goto put_ops; + } + if (peer_net) + rtnl_nets_add(&rtnl_nets, peer_net); } } @@ -4004,7 +4009,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, } rtnl_nets_lock(&rtnl_nets); - ret = __rtnl_newlink(skb, nlh, ops, tgt_net, link_net, tbs, data, extack); + ret = __rtnl_newlink(skb, nlh, ops, tgt_net, link_net, peer_net, tbs, data, extack); rtnl_nets_unlock(&rtnl_nets); put_net: diff --git a/net/core/skmsg.c b/net/core/skmsg.c index e90fbab703b2..61f3f3d4e528 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -369,8 +369,8 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, struct sk_msg *msg, u32 bytes) { int ret = -ENOSPC, i = msg->sg.curr; + u32 copy, buf_size, copied = 0; struct scatterlist *sge; - u32 copy, buf_size; void *to; do { @@ -397,6 +397,7 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, goto out; } bytes -= copy; + copied += copy; if (!bytes) break; msg->sg.copybreak = 0; @@ -404,7 +405,7 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, } while (i != msg->sg.end); out: msg->sg.curr = i; - return ret; + return (ret < 0) ? ret : copied; } EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter); @@ -445,8 +446,10 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, if (likely(!peek)) { sge->offset += copy; sge->length -= copy; - if (!msg_rx->skb) + if (!msg_rx->skb) { sk_mem_uncharge(sk, copy); + atomic_sub(copy, &sk->sk_rmem_alloc); + } msg_rx->sg.size -= copy; if (!sge->length) { @@ -772,6 +775,8 @@ static void __sk_psock_purge_ingress_msg(struct sk_psock *psock) list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) { list_del(&msg->list); + if (!msg->skb) + atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc); sk_msg_free(psock->sk, msg); kfree(msg); } diff --git a/net/core/sock.c b/net/core/sock.c index 74729d20cd00..be84885f9290 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1295,7 +1295,10 @@ int sk_setsockopt(struct sock *sk, int level, int optname, sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); break; case SO_REUSEPORT: - sk->sk_reuseport = valbool; + if (valbool && !sk_is_inet(sk)) + ret = -EOPNOTSUPP; + else + sk->sk_reuseport = valbool; break; case SO_DONTROUTE: sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 78347d7d25ef..f1b9b3958792 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -159,6 +159,7 @@ static void sock_map_del_link(struct sock *sk, verdict_stop = true; list_del(&link->list); sk_psock_free_link(link); + break; } } spin_unlock_bh(&psock->link_lock); @@ -411,12 +412,11 @@ static void *sock_map_lookup_sys(struct bpf_map *map, void *key) static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test, struct sock **psk) { - struct sock *sk; + struct sock *sk = NULL; int err = 0; spin_lock_bh(&stab->lock); - sk = *psk; - if (!sk_test || sk_test == sk) + if (!sk_test || sk_test == *psk) sk = xchg(psk, NULL); if (likely(sk)) diff --git a/net/dccp/feat.c b/net/dccp/feat.c index 54086bb05c42..f7554dcdaaba 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c @@ -1166,8 +1166,12 @@ static u8 dccp_feat_change_recv(struct list_head *fn, u8 is_mandatory, u8 opt, goto not_valid_or_not_known; } - return dccp_feat_push_confirm(fn, feat, local, &fval); + if (dccp_feat_push_confirm(fn, feat, local, &fval)) { + kfree(fval.sp.vec); + return DCCP_RESET_CODE_TOO_BUSY; + } + return 0; } else if (entry->state == FEAT_UNSTABLE) { /* 6.6.2 */ return 0; } diff --git a/net/dsa/tag.h b/net/dsa/tag.h index d5707870906b..5d80ddad4ff6 100644 --- a/net/dsa/tag.h +++ b/net/dsa/tag.h @@ -138,9 +138,10 @@ static inline void dsa_software_untag_vlan_unaware_bridge(struct sk_buff *skb, * dsa_software_vlan_untag: Software VLAN untagging in DSA receive path * @skb: Pointer to socket buffer (packet) * - * Receive path method for switches which cannot avoid tagging all packets - * towards the CPU port. Called when ds->untag_bridge_pvid (legacy) or - * ds->untag_vlan_aware_bridge_pvid is set to true. + * Receive path method for switches which send some packets as VLAN-tagged + * towards the CPU port (generally from VLAN-aware bridge ports) even when the + * packet was not tagged on the wire. Called when ds->untag_bridge_pvid + * (legacy) or ds->untag_vlan_aware_bridge_pvid is set to true. * * As a side effect of this method, any VLAN tag from the skb head is moved * to hwaccel. @@ -149,14 +150,19 @@ static inline struct sk_buff *dsa_software_vlan_untag(struct sk_buff *skb) { struct dsa_port *dp = dsa_user_to_port(skb->dev); struct net_device *br = dsa_port_bridge_dev_get(dp); - u16 vid; + u16 vid, proto; + int err; /* software untagging for standalone ports not yet necessary */ if (!br) return skb; + err = br_vlan_get_proto(br, &proto); + if (err) + return skb; + /* Move VLAN tag from data to hwaccel */ - if (!skb_vlan_tag_present(skb)) { + if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) { skb = skb_vlan_untag(skb); if (!skb) return NULL; diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c index 8e8b1bef6af6..11ea8cfd6266 100644 --- a/net/dsa/tag_ocelot_8021q.c +++ b/net/dsa/tag_ocelot_8021q.c @@ -79,7 +79,7 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb, static struct sk_buff *ocelot_rcv(struct sk_buff *skb, struct net_device *netdev) { - int src_port, switch_id; + int src_port = -1, switch_id = -1; dsa_8021q_rcv(skb, &src_port, &switch_id, NULL, NULL); diff --git a/net/ethtool/bitset.c b/net/ethtool/bitset.c index 0515d6604b3b..f0883357d12e 100644 --- a/net/ethtool/bitset.c +++ b/net/ethtool/bitset.c @@ -425,12 +425,32 @@ static int ethnl_parse_bit(unsigned int *index, bool *val, unsigned int nbits, return 0; } +/** + * ethnl_bitmap32_equal() - Compare two bitmaps + * @map1: first bitmap + * @map2: second bitmap + * @nbits: bit size to compare + * + * Return: true if first @nbits are equal, false if not + */ +static bool ethnl_bitmap32_equal(const u32 *map1, const u32 *map2, + unsigned int nbits) +{ + if (memcmp(map1, map2, nbits / 32 * sizeof(u32))) + return false; + if (nbits % 32 == 0) + return true; + return !((map1[nbits / 32] ^ map2[nbits / 32]) & + ethnl_lower_bits(nbits % 32)); +} + static int ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits, const struct nlattr *attr, struct nlattr **tb, ethnl_string_array_t names, struct netlink_ext_ack *extack, bool *mod) { + u32 *saved_bitmap = NULL; struct nlattr *bit_attr; bool no_mask; int rem; @@ -448,8 +468,20 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits, } no_mask = tb[ETHTOOL_A_BITSET_NOMASK]; - if (no_mask) - ethnl_bitmap32_clear(bitmap, 0, nbits, mod); + if (no_mask) { + unsigned int nwords = DIV_ROUND_UP(nbits, 32); + unsigned int nbytes = nwords * sizeof(u32); + bool dummy; + + /* The bitmap size is only the size of the map part without + * its mask part. + */ + saved_bitmap = kcalloc(nwords, sizeof(u32), GFP_KERNEL); + if (!saved_bitmap) + return -ENOMEM; + memcpy(saved_bitmap, bitmap, nbytes); + ethnl_bitmap32_clear(bitmap, 0, nbits, &dummy); + } nla_for_each_nested(bit_attr, tb[ETHTOOL_A_BITSET_BITS], rem) { bool old_val, new_val; @@ -458,22 +490,30 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits, if (nla_type(bit_attr) != ETHTOOL_A_BITSET_BITS_BIT) { NL_SET_ERR_MSG_ATTR(extack, bit_attr, "only ETHTOOL_A_BITSET_BITS_BIT allowed in ETHTOOL_A_BITSET_BITS"); + kfree(saved_bitmap); return -EINVAL; } ret = ethnl_parse_bit(&idx, &new_val, nbits, bit_attr, no_mask, names, extack); - if (ret < 0) + if (ret < 0) { + kfree(saved_bitmap); return ret; + } old_val = bitmap[idx / 32] & ((u32)1 << (idx % 32)); if (new_val != old_val) { if (new_val) bitmap[idx / 32] |= ((u32)1 << (idx % 32)); else bitmap[idx / 32] &= ~((u32)1 << (idx % 32)); - *mod = true; + if (!no_mask) + *mod = true; } } + if (no_mask && !ethnl_bitmap32_equal(saved_bitmap, bitmap, nbits)) + *mod = true; + + kfree(saved_bitmap); return 0; } diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 61df8ce44379..7bb94875a7ec 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -993,7 +993,8 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, return rc; /* Nonzero ring with RSS only makes sense if NIC adds them together */ - if (info.flow_type & FLOW_RSS && !ops->cap_rss_rxnfc_adds && + if (cmd == ETHTOOL_SRXCLSRLINS && info.flow_type & FLOW_RSS && + !ops->cap_rss_rxnfc_adds && ethtool_get_flow_spec_ring(info.fs.ring_cookie)) return -EINVAL; diff --git a/net/handshake/handshake-test.c b/net/handshake/handshake-test.c index 34fd1d9b2db8..55442b2f518a 100644 --- a/net/handshake/handshake-test.c +++ b/net/handshake/handshake-test.c @@ -17,7 +17,7 @@ #include <uapi/linux/handshake.h> #include "handshake.h" -MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); static int test_accept_func(struct handshake_req *req, struct genl_info *info, int fd) diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 31a416ee21ad..03eadd6c51fd 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -246,20 +246,22 @@ static const struct header_ops hsr_header_ops = { .parse = eth_header_parse, }; -static struct sk_buff *hsr_init_skb(struct hsr_port *master) +static struct sk_buff *hsr_init_skb(struct hsr_port *master, int extra) { struct hsr_priv *hsr = master->hsr; struct sk_buff *skb; int hlen, tlen; + int len; hlen = LL_RESERVED_SPACE(master->dev); tlen = master->dev->needed_tailroom; + len = sizeof(struct hsr_sup_tag) + sizeof(struct hsr_sup_payload); /* skb size is same for PRP/HSR frames, only difference * being, for PRP it is a trailer and for HSR it is a - * header + * header. + * RedBox might use @extra more bytes. */ - skb = dev_alloc_skb(sizeof(struct hsr_sup_tag) + - sizeof(struct hsr_sup_payload) + hlen + tlen); + skb = dev_alloc_skb(len + extra + hlen + tlen); if (!skb) return skb; @@ -295,6 +297,7 @@ static void send_hsr_supervision_frame(struct hsr_port *port, struct hsr_sup_tlv *hsr_stlv; struct hsr_sup_tag *hsr_stag; struct sk_buff *skb; + int extra = 0; *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL); if (hsr->announce_count < 3 && hsr->prot_version == 0) { @@ -303,7 +306,11 @@ static void send_hsr_supervision_frame(struct hsr_port *port, hsr->announce_count++; } - skb = hsr_init_skb(port); + if (hsr->redbox) + extra = sizeof(struct hsr_sup_tlv) + + sizeof(struct hsr_sup_payload); + + skb = hsr_init_skb(port, extra); if (!skb) { netdev_warn_once(port->dev, "HSR: Could not send supervision frame\n"); return; @@ -362,7 +369,7 @@ static void send_prp_supervision_frame(struct hsr_port *master, struct hsr_sup_tag *hsr_stag; struct sk_buff *skb; - skb = hsr_init_skb(master); + skb = hsr_init_skb(master, 0); if (!skb) { netdev_warn_once(master->dev, "PRP: Could not send supervision frame\n"); return; diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index aa6acebc7c1e..87bb3a91598e 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -700,6 +700,8 @@ static int fill_frame_info(struct hsr_frame_info *frame, frame->is_vlan = true; if (frame->is_vlan) { + if (skb->mac_len < offsetofend(struct hsr_vlan_ethhdr, vlanhdr)) + return -EINVAL; vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr; proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto; } diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index cc6d0bd7b0a9..4aca1f05edd3 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -61,15 +61,17 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len err = -EACCES; goto out; } + + /* Update addresses before rehashing */ + inet->inet_daddr = fl4->daddr; + inet->inet_dport = usin->sin_port; if (!inet->inet_saddr) - inet->inet_saddr = fl4->saddr; /* Update source address */ + inet->inet_saddr = fl4->saddr; if (!inet->inet_rcv_saddr) { inet->inet_rcv_saddr = fl4->saddr; if (sk->sk_prot->rehash) sk->sk_prot->rehash(sk); } - inet->inet_daddr = fl4->daddr; - inet->inet_dport = usin->sin_port; reuseport_has_conns_set(sk); sk->sk_state = TCP_ESTABLISHED; sk_set_txhash(sk); diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 4f088fa1c2f2..963a89ae9c26 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -517,6 +517,9 @@ static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4, if (!IS_ERR(dst)) { if (rt != rt2) return rt; + if (inet_addr_type_dev_table(net, route_lookup_dev, + fl4->daddr) == RTN_LOCAL) + return rt; } else if (PTR_ERR(dst) == -EPERM) { rt = NULL; } else { diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 25505f9b724c..09b73acf037a 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -294,7 +294,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev) ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr, iph->saddr, tunnel->parms.o_key, - iph->tos & INET_DSCP_MASK, dev_net(dev), + iph->tos & INET_DSCP_MASK, tunnel->net, tunnel->parms.link, tunnel->fwmark, 0, 0); rt = ip_route_output_key(tunnel->net, &fl4); @@ -611,7 +611,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, } ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, tunnel_id_to_key32(key->tun_id), - tos & INET_DSCP_MASK, dev_net(dev), 0, skb->mark, + tos & INET_DSCP_MASK, tunnel->net, 0, skb->mark, skb_get_hash(skb), key->flow_flags); if (!tunnel_hlen) @@ -774,7 +774,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr, tunnel->parms.o_key, tos & INET_DSCP_MASK, - dev_net(dev), READ_ONCE(tunnel->parms.link), + tunnel->net, READ_ONCE(tunnel->parms.link), tunnel->fwmark, skb_get_hash(skb), 0); if (ip_tunnel_encap(skb, &tunnel->encap, &protocol, &fl4) < 0) diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index c5b8ec5c0a8c..99d8faa508e5 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -122,7 +122,7 @@ static void ipmr_expire_process(struct timer_list *t); static bool ipmr_can_free_table(struct net *net) { - return !check_net(net) || !net->ipv4.mr_rules_ops; + return !check_net(net) || !net_initialized(net); } static struct mr_table *ipmr_mr_table_iter(struct net *net, diff --git a/net/ipv4/route.c b/net/ipv4/route.c index e5603e84b20d..0fbec3509618 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2478,7 +2478,8 @@ ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, int our = 0; if (!in_dev) - return -EINVAL; + return reason; + our = ip_check_mc_rcu(in_dev, daddr, saddr, ip_hdr(skb)->protocol); diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 370993c03d31..47f65b1b70ca 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -49,13 +49,14 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, sge = sk_msg_elem(msg, i); size = (apply && apply_bytes < sge->length) ? apply_bytes : sge->length; - if (!sk_wmem_schedule(sk, size)) { + if (!__sk_rmem_schedule(sk, size, false)) { if (!copied) ret = -ENOMEM; break; } sk_mem_charge(sk, size); + atomic_add(size, &sk->sk_rmem_alloc); sk_msg_xfer(tmp, msg, i, size); copied += size; if (sge->length) @@ -74,7 +75,8 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, if (!ret) { msg->sg.start = i; - sk_psock_queue_msg(psock, tmp); + if (!sk_psock_queue_msg(psock, tmp)) + atomic_sub(copied, &sk->sk_rmem_alloc); sk_psock_data_ready(sk, psock); } else { sk_msg_free(sk, tmp); @@ -441,7 +443,6 @@ more_data: cork = true; psock->cork = NULL; } - sk_msg_return(sk, msg, tosend); release_sock(sk); origsize = msg->sg.size; @@ -453,8 +454,9 @@ more_data: sock_put(sk_redir); lock_sock(sk); + sk_mem_uncharge(sk, sent); if (unlikely(ret < 0)) { - int free = sk_msg_free_nocharge(sk, msg); + int free = sk_msg_free(sk, msg); if (!cork) *copied -= free; @@ -468,7 +470,7 @@ more_data: break; case __SK_DROP: default: - sk_msg_free_partial(sk, msg, tosend); + sk_msg_free(sk, msg); sk_msg_apply_bytes(psock, tosend); *copied -= (tosend + delta); return -EACCES; @@ -484,11 +486,8 @@ more_data: } if (msg && msg->sg.data[msg->sg.start].page_link && - msg->sg.data[msg->sg.start].length) { - if (eval == __SK_REDIRECT) - sk_mem_charge(sk, tosend - sent); + msg->sg.data[msg->sg.start].length) goto more_data; - } } return ret; } @@ -496,7 +495,7 @@ more_data: static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { struct sk_msg tmp, *msg_tx = NULL; - int copied = 0, err = 0; + int copied = 0, err = 0, ret = 0; struct sk_psock *psock; long timeo; int flags; @@ -539,14 +538,14 @@ static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) copy = msg_tx->sg.size - osize; } - err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx, + ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx, copy); - if (err < 0) { + if (ret < 0) { sk_msg_trim(sk, msg_tx, osize); goto out_err; } - copied += copy; + copied += ret; if (psock->cork_bytes) { if (size > psock->cork_bytes) psock->cork_bytes = 0; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 5bdf13ac26ef..4811727b8a02 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -7328,6 +7328,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, req->timeout))) { reqsk_free(req); + dst_release(dst); return 0; } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index bb1fe1ba867a..7121d8573928 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -326,6 +326,10 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) tcptw->tw_last_oow_ack_time = 0; tcptw->tw_tx_delay = tp->tcp_tx_delay; tw->tw_txhash = sk->sk_txhash; + tw->tw_tx_queue_mapping = sk->sk_tx_queue_mapping; +#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING + tw->tw_rx_queue_mapping = sk->sk_rx_queue_mapping; +#endif #if IS_ENABLED(CONFIG_IPV6) if (tw->tw_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5485a70b5fe5..0e5b9a654254 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -883,8 +883,10 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, unsigned int size; if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) { - opts->options |= OPTION_MPTCP; - remaining -= size; + if (remaining >= size) { + opts->options |= OPTION_MPTCP; + remaining -= size; + } } } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 6a01905d379f..e8953e88efef 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1674,7 +1674,6 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) struct sk_buff_head *list = &sk->sk_receive_queue; int rmem, err = -ENOMEM; spinlock_t *busy = NULL; - bool becomes_readable; int size, rcvbuf; /* Immediately drop when the receive queue is full. @@ -1715,19 +1714,12 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) */ sock_skb_set_dropcount(sk, skb); - becomes_readable = skb_queue_empty(list); __skb_queue_tail(list, skb); spin_unlock(&list->lock); - if (!sock_flag(sk, SOCK_DEAD)) { - if (becomes_readable || - sk->sk_data_ready != sock_def_readable || - READ_ONCE(sk->sk_peek_off) >= 0) - INDIRECT_CALL_1(sk->sk_data_ready, - sock_def_readable, sk); - else - sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN); - } + if (!sock_flag(sk, SOCK_DEAD)) + INDIRECT_CALL_1(sk->sk_data_ready, sock_def_readable, sk); + busylock_release(busy); return 0; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index c489a1e6aec9..0e765466d7f7 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4821,7 +4821,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, ifm->ifa_prefixlen, extack); } -static int modify_prefix_route(struct inet6_ifaddr *ifp, +static int modify_prefix_route(struct net *net, struct inet6_ifaddr *ifp, unsigned long expires, u32 flags, bool modify_peer) { @@ -4845,7 +4845,9 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp, ifp->prefix_len, ifp->rt_priority, ifp->idev->dev, expires, flags, GFP_KERNEL); - } else { + return 0; + } + if (f6i != net->ipv6.fib6_null_entry) { table = f6i->fib6_table; spin_lock_bh(&table->tb6_lock); @@ -4858,9 +4860,8 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp, } spin_unlock_bh(&table->tb6_lock); - - fib6_info_release(f6i); } + fib6_info_release(f6i); return 0; } @@ -4939,7 +4940,7 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp, int rc = -ENOENT; if (had_prefixroute) - rc = modify_prefix_route(ifp, expires, flags, false); + rc = modify_prefix_route(net, ifp, expires, flags, false); /* prefix route could have been deleted; if so restore it */ if (rc == -ENOENT) { @@ -4949,7 +4950,7 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp, } if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr)) - rc = modify_prefix_route(ifp, expires, flags, true); + rc = modify_prefix_route(net, ifp, expires, flags, true); if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) { addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len, diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 7646e401c630..1d41b2ab4884 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -195,6 +195,8 @@ static const struct nf_hook_ops ila_nf_hook_ops[] = { }, }; +static DEFINE_MUTEX(ila_mutex); + static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp) { struct ila_net *ilan = net_generic(net, ila_net_id); @@ -202,16 +204,20 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp) spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match); int err = 0, order; - if (!ilan->xlat.hooks_registered) { + if (!READ_ONCE(ilan->xlat.hooks_registered)) { /* We defer registering net hooks in the namespace until the * first mapping is added. */ - err = nf_register_net_hooks(net, ila_nf_hook_ops, - ARRAY_SIZE(ila_nf_hook_ops)); + mutex_lock(&ila_mutex); + if (!ilan->xlat.hooks_registered) { + err = nf_register_net_hooks(net, ila_nf_hook_ops, + ARRAY_SIZE(ila_nf_hook_ops)); + if (!err) + WRITE_ONCE(ilan->xlat.hooks_registered, true); + } + mutex_unlock(&ila_mutex); if (err) return err; - - ilan->xlat.hooks_registered = true; } ila = kzalloc(sizeof(*ila), GFP_KERNEL); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 7f1902ac3586..578ff1336afe 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -110,7 +110,7 @@ static void ipmr_expire_process(struct timer_list *t); static bool ip6mr_can_free_table(struct net *net) { - return !check_net(net) || !net->ipv6.mr6_rules_ops; + return !check_net(net) || !net_initialized(net); } static struct mr_table *ip6mr_mr_table_iter(struct net *net, diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 63d7681c929f..67ff16c04718 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2780,10 +2780,10 @@ static void ip6_negative_advice(struct sock *sk, if (rt->rt6i_flags & RTF_CACHE) { rcu_read_lock(); if (rt6_check_expired(rt)) { - /* counteract the dst_release() in sk_dst_reset() */ - dst_hold(dst); + /* rt/dst can not be destroyed yet, + * because of rcu_read_lock() + */ sk_dst_reset(sk); - rt6_remove_exception_rt(rt); } rcu_read_unlock(); diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c index 51bccfb00a9c..61b0159b2fbe 100644 --- a/net/llc/llc_input.c +++ b/net/llc/llc_input.c @@ -124,8 +124,8 @@ static inline int llc_fixup_skb(struct sk_buff *skb) if (unlikely(!pskb_may_pull(skb, llc_len))) return 0; - skb->transport_header += llc_len; skb_pull(skb, llc_len); + skb_reset_transport_header(skb); if (skb->protocol == htons(ETH_P_802_2)) { __be16 pdulen; s32 data_size; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 61a824ec33da..d3fc158ccaf6 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1088,13 +1088,13 @@ ieee80211_copy_mbssid_beacon(u8 *pos, struct cfg80211_mbssid_elems *dst, { int i, offset = 0; + dst->cnt = src->cnt; for (i = 0; i < src->cnt; i++) { memcpy(pos + offset, src->elem[i].data, src->elem[i].len); dst->elem[i].len = src->elem[i].len; dst->elem[i].data = pos + offset; offset += dst->elem[i].len; } - dst->cnt = src->cnt; return offset; } @@ -1935,6 +1935,8 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, params->eht_capa_len, link_sta); + ieee80211_sta_init_nss(link_sta); + if (params->opmode_notif_used) { /* returned value is only needed for rc update, but the * rc isn't initialized here yet, so ignore it @@ -1944,8 +1946,6 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, sband->band); } - ieee80211_sta_init_nss(link_sta); - return 0; } @@ -3726,13 +3726,12 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif, unsigned int link_id) } EXPORT_SYMBOL(ieee80211_csa_finish); -void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_tx) +void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; - sdata->csa_blocked_queues = block_tx; sdata_info(sdata, "channel switch failed, disconnecting\n"); wiphy_work_queue(local->hw.wiphy, &ifmgd->csa_connection_drop_work); } @@ -5046,10 +5045,16 @@ static void ieee80211_del_intf_link(struct wiphy *wiphy, unsigned int link_id) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + u16 new_links = wdev->valid_links & ~BIT(link_id); lockdep_assert_wiphy(sdata->local->hw.wiphy); - ieee80211_vif_set_links(sdata, wdev->valid_links, 0); + /* During the link teardown process, certain functions require the + * link_id to remain in the valid_links bitmap. Therefore, instead + * of removing the link_id from the bitmap, pass a masked value to + * simulate as if link_id does not exist anymore. + */ + ieee80211_vif_set_links(sdata, new_links, 0); } static int diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index a00096dd787b..9f0db39b28ff 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1104,8 +1104,6 @@ struct ieee80211_sub_if_data { unsigned long state; - bool csa_blocked_queues; - char name[IFNAMSIZ]; struct ieee80211_fragment_cache frags; @@ -2412,17 +2410,13 @@ void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_hdr *hdr, bool ack, u16 tx_time); - +unsigned int +ieee80211_get_vif_queues(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata); void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, unsigned long queues, enum queue_stop_reason reason, bool refcounted); -void ieee80211_stop_vif_queues(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata, - enum queue_stop_reason reason); -void ieee80211_wake_vif_queues(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata, - enum queue_stop_reason reason); void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, unsigned long queues, enum queue_stop_reason reason, @@ -2433,6 +2427,43 @@ void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason, bool refcounted); +static inline void +ieee80211_stop_vif_queues(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum queue_stop_reason reason) +{ + ieee80211_stop_queues_by_reason(&local->hw, + ieee80211_get_vif_queues(local, sdata), + reason, true); +} + +static inline void +ieee80211_wake_vif_queues(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum queue_stop_reason reason) +{ + ieee80211_wake_queues_by_reason(&local->hw, + ieee80211_get_vif_queues(local, sdata), + reason, true); +} +static inline void +ieee80211_stop_vif_queues_norefcount(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum queue_stop_reason reason) +{ + ieee80211_stop_queues_by_reason(&local->hw, + ieee80211_get_vif_queues(local, sdata), + reason, false); +} +static inline void +ieee80211_wake_vif_queues_norefcount(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum queue_stop_reason reason) +{ + ieee80211_wake_queues_by_reason(&local->hw, + ieee80211_get_vif_queues(local, sdata), + reason, false); +} void ieee80211_add_pending_skb(struct ieee80211_local *local, struct sk_buff *skb); void ieee80211_add_pending_skbs(struct ieee80211_local *local, diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index a8fbedd530f4..806dffa48ef9 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -278,13 +278,8 @@ static int _ieee80211_change_mac(struct ieee80211_sub_if_data *sdata, ret = eth_mac_addr(sdata->dev, sa); if (ret == 0) { - if (check_dup) { - memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN); - ether_addr_copy(sdata->vif.bss_conf.addr, sdata->vif.addr); - } else { - memset(sdata->vif.addr, 0, ETH_ALEN); - memset(sdata->vif.bss_conf.addr, 0, ETH_ALEN); - } + memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN); + ether_addr_copy(sdata->vif.bss_conf.addr, sdata->vif.addr); } /* Regardless of eth_mac_addr() return we still want to add the @@ -1323,6 +1318,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) } } + sdata->vif.addr_valid = sdata->vif.type != NL80211_IFTYPE_MONITOR || + (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE); switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: /* no need to tell driver, but set carrier and chanctx */ @@ -2374,18 +2371,14 @@ void ieee80211_vif_block_queues_csa(struct ieee80211_sub_if_data *sdata) if (ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA)) return; - ieee80211_stop_vif_queues(local, sdata, - IEEE80211_QUEUE_STOP_REASON_CSA); - sdata->csa_blocked_queues = true; + ieee80211_stop_vif_queues_norefcount(local, sdata, + IEEE80211_QUEUE_STOP_REASON_CSA); } void ieee80211_vif_unblock_queues_csa(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; - if (sdata->csa_blocked_queues) { - ieee80211_wake_vif_queues(local, sdata, - IEEE80211_QUEUE_STOP_REASON_CSA); - sdata->csa_blocked_queues = false; - } + ieee80211_wake_vif_queues_norefcount(local, sdata, + IEEE80211_QUEUE_STOP_REASON_CSA); } diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index cb5f16366b9c..974081324aa4 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1157,14 +1157,14 @@ void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata, u64 changed) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - unsigned long bits = changed; + unsigned long bits[] = { BITMAP_FROM_U64(changed) }; u32 bit; - if (!bits) + if (!changed) return; /* if we race with running work, worst case this work becomes a noop */ - for_each_set_bit(bit, &bits, sizeof(changed) * BITS_PER_BYTE) + for_each_set_bit(bit, bits, sizeof(changed) * BITS_PER_BYTE) set_bit(bit, ifmsh->mbss_changed); set_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags); wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 480b664151c9..61c318f5239f 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2638,8 +2638,6 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link, */ link->conf->csa_active = true; link->u.mgd.csa.blocked_tx = csa_ie.mode; - sdata->csa_blocked_queues = - csa_ie.mode && !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA); wiphy_work_queue(sdata->local->hw.wiphy, &ifmgd->csa_connection_drop_work); diff --git a/net/mac80211/tests/elems.c b/net/mac80211/tests/elems.c index a413ba29f759..a53c55a879a8 100644 --- a/net/mac80211/tests/elems.c +++ b/net/mac80211/tests/elems.c @@ -7,7 +7,7 @@ #include <kunit/test.h> #include "../ieee80211_i.h" -MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); static void mle_defrag(struct kunit *test) { diff --git a/net/mac80211/tests/mfp.c b/net/mac80211/tests/mfp.c index a8dc1601da60..58e675e0ed91 100644 --- a/net/mac80211/tests/mfp.c +++ b/net/mac80211/tests/mfp.c @@ -9,7 +9,7 @@ #include "../ieee80211_i.h" #include "../sta_info.h" -MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); static const struct mfp_test_case { const char *desc; diff --git a/net/mac80211/tests/tpe.c b/net/mac80211/tests/tpe.c index dd63303a2985..c73b6c66bd5a 100644 --- a/net/mac80211/tests/tpe.c +++ b/net/mac80211/tests/tpe.c @@ -7,7 +7,7 @@ #include <kunit/test.h> #include "../ieee80211_i.h" -MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); static struct ieee80211_channel chan6g_1 = { .band = NL80211_BAND_6GHZ, diff --git a/net/mac80211/util.c b/net/mac80211/util.c index a4e1301cc999..dc0b74443c8d 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -657,7 +657,7 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw) } EXPORT_SYMBOL(ieee80211_wake_queues); -static unsigned int +unsigned int ieee80211_get_vif_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { @@ -669,7 +669,8 @@ ieee80211_get_vif_queues(struct ieee80211_local *local, queues = 0; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - queues |= BIT(sdata->vif.hw_queue[ac]); + if (sdata->vif.hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) + queues |= BIT(sdata->vif.hw_queue[ac]); if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE) queues |= BIT(sdata->vif.cab_queue); } else { @@ -724,24 +725,6 @@ void ieee80211_flush_queues(struct ieee80211_local *local, __ieee80211_flush_queues(local, sdata, 0, drop); } -void ieee80211_stop_vif_queues(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata, - enum queue_stop_reason reason) -{ - ieee80211_stop_queues_by_reason(&local->hw, - ieee80211_get_vif_queues(local, sdata), - reason, true); -} - -void ieee80211_wake_vif_queues(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata, - enum queue_stop_reason reason) -{ - ieee80211_wake_queues_by_reason(&local->hw, - ieee80211_get_vif_queues(local, sdata), - reason, true); -} - static void __iterate_interfaces(struct ieee80211_local *local, u32 iter_flags, void (*iterator)(void *data, u8 *mac, @@ -1843,6 +1826,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n"); else WARN(1, "Hardware became unavailable during restart.\n"); + ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, + IEEE80211_QUEUE_STOP_REASON_SUSPEND, + false); ieee80211_handle_reconfig_failure(local); return res; } diff --git a/net/mctp/route.c b/net/mctp/route.c index 597e9cf5aa64..3f2bd65ff5e3 100644 --- a/net/mctp/route.c +++ b/net/mctp/route.c @@ -374,8 +374,13 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) msk = NULL; rc = -EINVAL; - /* we may be receiving a locally-routed packet; drop source sk - * accounting + /* We may be receiving a locally-routed packet; drop source sk + * accounting. + * + * From here, we will either queue the skb - either to a frag_queue, or + * to a receiving socket. When that succeeds, we clear the skb pointer; + * a non-NULL skb on exit will be otherwise unowned, and hence + * kfree_skb()-ed. */ skb_orphan(skb); @@ -434,7 +439,9 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) * pending key. */ if (flags & MCTP_HDR_FLAG_EOM) { - sock_queue_rcv_skb(&msk->sk, skb); + rc = sock_queue_rcv_skb(&msk->sk, skb); + if (!rc) + skb = NULL; if (key) { /* we've hit a pending reassembly; not much we * can do but drop it @@ -443,7 +450,6 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) MCTP_TRACE_KEY_REPLIED); key = NULL; } - rc = 0; goto out_unlock; } @@ -470,8 +476,10 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) * this function. */ rc = mctp_key_add(key, msk); - if (!rc) + if (!rc) { trace_mctp_key_acquire(key); + skb = NULL; + } /* we don't need to release key->lock on exit, so * clean up here and suppress the unlock via @@ -489,6 +497,8 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) key = NULL; } else { rc = mctp_frag_queue(key, skb); + if (!rc) + skb = NULL; } } @@ -503,12 +513,19 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) else rc = mctp_frag_queue(key, skb); + if (rc) + goto out_unlock; + + /* we've queued; the queue owns the skb now */ + skb = NULL; + /* end of message? deliver to socket, and we're done with * the reassembly/response key */ - if (!rc && flags & MCTP_HDR_FLAG_EOM) { - sock_queue_rcv_skb(key->sk, key->reasm_head); - key->reasm_head = NULL; + if (flags & MCTP_HDR_FLAG_EOM) { + rc = sock_queue_rcv_skb(key->sk, key->reasm_head); + if (!rc) + key->reasm_head = NULL; __mctp_key_done_in(key, net, f, MCTP_TRACE_KEY_REPLIED); key = NULL; } @@ -527,8 +544,7 @@ out_unlock: if (any_key) mctp_key_unref(any_key); out: - if (rc) - kfree_skb(skb); + kfree_skb(skb); return rc; } diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c index 8551dab1d1e6..17165b86ce22 100644 --- a/net/mctp/test/route-test.c +++ b/net/mctp/test/route-test.c @@ -837,6 +837,90 @@ static void mctp_test_route_input_multiple_nets_key(struct kunit *test) mctp_test_route_input_multiple_nets_key_fini(test, &t2); } +/* Input route to socket, using a single-packet message, where sock delivery + * fails. Ensure we're handling the failure appropriately. + */ +static void mctp_test_route_input_sk_fail_single(struct kunit *test) +{ + const struct mctp_hdr hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_TO); + struct mctp_test_route *rt; + struct mctp_test_dev *dev; + struct socket *sock; + struct sk_buff *skb; + int rc; + + __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY); + + /* No rcvbuf space, so delivery should fail. __sock_set_rcvbuf will + * clamp the minimum to SOCK_MIN_RCVBUF, so we open-code this. + */ + lock_sock(sock->sk); + WRITE_ONCE(sock->sk->sk_rcvbuf, 0); + release_sock(sock->sk); + + skb = mctp_test_create_skb(&hdr, 10); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); + skb_get(skb); + + mctp_test_skb_set_dev(skb, dev); + + /* do route input, which should fail */ + rc = mctp_route_input(&rt->rt, skb); + KUNIT_EXPECT_NE(test, rc, 0); + + /* we should hold the only reference to skb */ + KUNIT_EXPECT_EQ(test, refcount_read(&skb->users), 1); + kfree_skb(skb); + + __mctp_route_test_fini(test, dev, rt, sock); +} + +/* Input route to socket, using a fragmented message, where sock delivery fails. + */ +static void mctp_test_route_input_sk_fail_frag(struct kunit *test) +{ + const struct mctp_hdr hdrs[2] = { RX_FRAG(FL_S, 0), RX_FRAG(FL_E, 1) }; + struct mctp_test_route *rt; + struct mctp_test_dev *dev; + struct sk_buff *skbs[2]; + struct socket *sock; + unsigned int i; + int rc; + + __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY); + + lock_sock(sock->sk); + WRITE_ONCE(sock->sk->sk_rcvbuf, 0); + release_sock(sock->sk); + + for (i = 0; i < ARRAY_SIZE(skbs); i++) { + skbs[i] = mctp_test_create_skb(&hdrs[i], 10); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skbs[i]); + skb_get(skbs[i]); + + mctp_test_skb_set_dev(skbs[i], dev); + } + + /* first route input should succeed, we're only queueing to the + * frag list + */ + rc = mctp_route_input(&rt->rt, skbs[0]); + KUNIT_EXPECT_EQ(test, rc, 0); + + /* final route input should fail to deliver to the socket */ + rc = mctp_route_input(&rt->rt, skbs[1]); + KUNIT_EXPECT_NE(test, rc, 0); + + /* we should hold the only reference to both skbs */ + KUNIT_EXPECT_EQ(test, refcount_read(&skbs[0]->users), 1); + kfree_skb(skbs[0]); + + KUNIT_EXPECT_EQ(test, refcount_read(&skbs[1]->users), 1); + kfree_skb(skbs[1]); + + __mctp_route_test_fini(test, dev, rt, sock); +} + #if IS_ENABLED(CONFIG_MCTP_FLOWS) static void mctp_test_flow_init(struct kunit *test, @@ -1053,6 +1137,8 @@ static struct kunit_case mctp_test_cases[] = { mctp_route_input_sk_reasm_gen_params), KUNIT_CASE_PARAM(mctp_test_route_input_sk_keys, mctp_route_input_sk_keys_gen_params), + KUNIT_CASE(mctp_test_route_input_sk_fail_single), + KUNIT_CASE(mctp_test_route_input_sk_fail_frag), KUNIT_CASE(mctp_test_route_input_multiple_nets_bind), KUNIT_CASE(mctp_test_route_input_multiple_nets_key), KUNIT_CASE(mctp_test_packet_flow), diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 1603b3702e22..a62bc874bf1e 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -667,8 +667,15 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * &echo, &drop_other_suboptions)) return false; + /* + * Later on, mptcp_write_options() will enforce mutually exclusion with + * DSS, bail out if such option is set and we can't drop it. + */ if (drop_other_suboptions) remaining += opt_size; + else if (opts->suboptions & OPTION_MPTCP_DSS) + return false; + len = mptcp_add_addr_len(opts->addr.family, echo, !!opts->addr.port); if (remaining < len) return false; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 08a72242428c..1b2e7cbb577f 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -136,6 +136,7 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, int delta; if (MPTCP_SKB_CB(from)->offset || + ((to->len + from->len) > (sk->sk_rcvbuf >> 3)) || !skb_try_coalesce(to, from, &fragstolen, &delta)) return false; @@ -528,13 +529,13 @@ static void mptcp_send_ack(struct mptcp_sock *msk) mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow)); } -static void mptcp_subflow_cleanup_rbuf(struct sock *ssk) +static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied) { bool slow; slow = lock_sock_fast(ssk); if (tcp_can_send_ack(ssk)) - tcp_cleanup_rbuf(ssk, 1); + tcp_cleanup_rbuf(ssk, copied); unlock_sock_fast(ssk, slow); } @@ -551,7 +552,7 @@ static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty) (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED))); } -static void mptcp_cleanup_rbuf(struct mptcp_sock *msk) +static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied) { int old_space = READ_ONCE(msk->old_wspace); struct mptcp_subflow_context *subflow; @@ -559,14 +560,14 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk) int space = __mptcp_space(sk); bool cleanup, rx_empty; - cleanup = (space > 0) && (space >= (old_space << 1)); - rx_empty = !__mptcp_rmem(sk); + cleanup = (space > 0) && (space >= (old_space << 1)) && copied; + rx_empty = !__mptcp_rmem(sk) && copied; mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty)) - mptcp_subflow_cleanup_rbuf(ssk); + mptcp_subflow_cleanup_rbuf(ssk, copied); } } @@ -1939,6 +1940,8 @@ do_error: goto out; } +static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied); + static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, struct msghdr *msg, size_t len, int flags, @@ -1992,6 +1995,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, break; } + mptcp_rcv_space_adjust(msk, copied); return copied; } @@ -2217,9 +2221,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, copied += bytes_read; - /* be sure to advertise window change */ - mptcp_cleanup_rbuf(msk); - if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk)) continue; @@ -2268,7 +2269,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, } pr_debug("block timeout %ld\n", timeo); - mptcp_rcv_space_adjust(msk, copied); + mptcp_cleanup_rbuf(msk, copied); err = sk_wait_data(sk, &timeo, NULL); if (err < 0) { err = copied ? : err; @@ -2276,7 +2277,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, } } - mptcp_rcv_space_adjust(msk, copied); + mptcp_cleanup_rbuf(msk, copied); out_err: if (cmsg_flags && copied >= 0) { diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 61431690cbd5..cc20e6d56807 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -104,14 +104,19 @@ find_set_type(const char *name, u8 family, u8 revision) static bool load_settype(const char *name) { + if (!try_module_get(THIS_MODULE)) + return false; + nfnl_unlock(NFNL_SUBSYS_IPSET); pr_debug("try to load ip_set_%s\n", name); if (request_module("ip_set_%s", name) < 0) { pr_warn("Can't find ip_set type %s\n", name); nfnl_lock(NFNL_SUBSYS_IPSET); + module_put(THIS_MODULE); return false; } nfnl_lock(NFNL_SUBSYS_IPSET); + module_put(THIS_MODULE); return true; } diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index bfae7066936b..db794fe1300e 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -611,6 +611,8 @@ init_list_set(struct net *net, struct ip_set *set, u32 size) return true; } +static struct lock_class_key list_set_lockdep_key; + static int list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[], u32 flags) @@ -627,6 +629,7 @@ list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[], if (size < IP_SET_LIST_MIN_SIZE) size = IP_SET_LIST_MIN_SIZE; + lockdep_set_class(&set->lock, &list_set_lockdep_key); set->variant = &set_variant; set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem), __alignof__(struct set_elem)); diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 98d7dbe3d787..c0289f83f96d 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -1495,8 +1495,8 @@ int __init ip_vs_conn_init(void) max_avail -= 2; /* ~4 in hash row */ max_avail -= 1; /* IPVS up to 1/2 of mem */ max_avail -= order_base_2(sizeof(struct ip_vs_conn)); - max = clamp(max, min, max_avail); - ip_vs_conn_tab_bits = clamp_val(ip_vs_conn_tab_bits, min, max); + max = clamp(max_avail, min, max); + ip_vs_conn_tab_bits = clamp(ip_vs_conn_tab_bits, min, max); ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits; ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1; diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index f100da4ba3bc..a9fd1d3fc2cb 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c @@ -340,7 +340,7 @@ void __net_exit ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs) int __init ip_vs_protocol_init(void) { - char protocols[64]; + char protocols[64] = { 0 }; #define REGISTER_PROTOCOL(p) \ do { \ register_ip_vs_protocol(p); \ @@ -348,8 +348,6 @@ int __init ip_vs_protocol_init(void) strcat(protocols, (p)->name); \ } while (0) - protocols[0] = '\0'; - protocols[2] = '\0'; #ifdef CONFIG_IP_VS_PROTO_TCP REGISTER_PROTOCOL(&ip_vs_protocol_tcp); #endif diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 21b6f7410a1f..0b9f1e8dfe49 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1596,7 +1596,6 @@ static int nf_tables_newtable(struct sk_buff *skb, const struct nfnl_info *info, INIT_LIST_HEAD(&table->sets); INIT_LIST_HEAD(&table->objects); INIT_LIST_HEAD(&table->flowtables); - write_pnet(&table->net, net); table->family = family; table->flags = flags; table->handle = ++nft_net->table_handle; @@ -3987,8 +3986,11 @@ void nf_tables_rule_destroy(const struct nft_ctx *ctx, struct nft_rule *rule) kfree(rule); } +/* can only be used if rule is no longer visible to dumps */ static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule) { + lockdep_commit_lock_is_held(ctx->net); + nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE); nf_tables_rule_destroy(ctx, rule); } @@ -5757,6 +5759,8 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding, enum nft_trans_phase phase) { + lockdep_commit_lock_is_held(ctx->net); + switch (phase) { case NFT_TRANS_PREPARE_ERROR: nft_set_trans_unbind(ctx, set); @@ -11695,19 +11699,6 @@ static void __nft_release_basechain_now(struct nft_ctx *ctx) nf_tables_chain_destroy(ctx->chain); } -static void nft_release_basechain_rcu(struct rcu_head *head) -{ - struct nft_chain *chain = container_of(head, struct nft_chain, rcu_head); - struct nft_ctx ctx = { - .family = chain->table->family, - .chain = chain, - .net = read_pnet(&chain->table->net), - }; - - __nft_release_basechain_now(&ctx); - put_net(ctx.net); -} - int __nft_release_basechain(struct nft_ctx *ctx) { struct nft_rule *rule; @@ -11722,11 +11713,18 @@ int __nft_release_basechain(struct nft_ctx *ctx) nft_chain_del(ctx->chain); nft_use_dec(&ctx->table->use); - if (maybe_get_net(ctx->net)) - call_rcu(&ctx->chain->rcu_head, nft_release_basechain_rcu); - else + if (!maybe_get_net(ctx->net)) { __nft_release_basechain_now(ctx); + return 0; + } + + /* wait for ruleset dumps to complete. Owning chain is no longer in + * lists, so new dumps can't find any of these rules anymore. + */ + synchronize_rcu(); + __nft_release_basechain_now(ctx); + put_net(ctx->net); return 0; } EXPORT_SYMBOL_GPL(__nft_release_basechain); diff --git a/net/netfilter/nft_inner.c b/net/netfilter/nft_inner.c index 928312d01eb1..817ab978d24a 100644 --- a/net/netfilter/nft_inner.c +++ b/net/netfilter/nft_inner.c @@ -210,35 +210,66 @@ static int nft_inner_parse(const struct nft_inner *priv, struct nft_pktinfo *pkt, struct nft_inner_tun_ctx *tun_ctx) { - struct nft_inner_tun_ctx ctx = {}; u32 off = pkt->inneroff; if (priv->flags & NFT_INNER_HDRSIZE && - nft_inner_parse_tunhdr(priv, pkt, &ctx, &off) < 0) + nft_inner_parse_tunhdr(priv, pkt, tun_ctx, &off) < 0) return -1; if (priv->flags & (NFT_INNER_LL | NFT_INNER_NH)) { - if (nft_inner_parse_l2l3(priv, pkt, &ctx, off) < 0) + if (nft_inner_parse_l2l3(priv, pkt, tun_ctx, off) < 0) return -1; } else if (priv->flags & NFT_INNER_TH) { - ctx.inner_thoff = off; - ctx.flags |= NFT_PAYLOAD_CTX_INNER_TH; + tun_ctx->inner_thoff = off; + tun_ctx->flags |= NFT_PAYLOAD_CTX_INNER_TH; } - *tun_ctx = ctx; tun_ctx->type = priv->type; + tun_ctx->cookie = (unsigned long)pkt->skb; pkt->flags |= NFT_PKTINFO_INNER_FULL; return 0; } +static bool nft_inner_restore_tun_ctx(const struct nft_pktinfo *pkt, + struct nft_inner_tun_ctx *tun_ctx) +{ + struct nft_inner_tun_ctx *this_cpu_tun_ctx; + + local_bh_disable(); + this_cpu_tun_ctx = this_cpu_ptr(&nft_pcpu_tun_ctx); + if (this_cpu_tun_ctx->cookie != (unsigned long)pkt->skb) { + local_bh_enable(); + return false; + } + *tun_ctx = *this_cpu_tun_ctx; + local_bh_enable(); + + return true; +} + +static void nft_inner_save_tun_ctx(const struct nft_pktinfo *pkt, + const struct nft_inner_tun_ctx *tun_ctx) +{ + struct nft_inner_tun_ctx *this_cpu_tun_ctx; + + local_bh_disable(); + this_cpu_tun_ctx = this_cpu_ptr(&nft_pcpu_tun_ctx); + if (this_cpu_tun_ctx->cookie != tun_ctx->cookie) + *this_cpu_tun_ctx = *tun_ctx; + local_bh_enable(); +} + static bool nft_inner_parse_needed(const struct nft_inner *priv, const struct nft_pktinfo *pkt, - const struct nft_inner_tun_ctx *tun_ctx) + struct nft_inner_tun_ctx *tun_ctx) { if (!(pkt->flags & NFT_PKTINFO_INNER_FULL)) return true; + if (!nft_inner_restore_tun_ctx(pkt, tun_ctx)) + return true; + if (priv->type != tun_ctx->type) return true; @@ -248,27 +279,29 @@ static bool nft_inner_parse_needed(const struct nft_inner *priv, static void nft_inner_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { - struct nft_inner_tun_ctx *tun_ctx = this_cpu_ptr(&nft_pcpu_tun_ctx); const struct nft_inner *priv = nft_expr_priv(expr); + struct nft_inner_tun_ctx tun_ctx = {}; if (nft_payload_inner_offset(pkt) < 0) goto err; - if (nft_inner_parse_needed(priv, pkt, tun_ctx) && - nft_inner_parse(priv, (struct nft_pktinfo *)pkt, tun_ctx) < 0) + if (nft_inner_parse_needed(priv, pkt, &tun_ctx) && + nft_inner_parse(priv, (struct nft_pktinfo *)pkt, &tun_ctx) < 0) goto err; switch (priv->expr_type) { case NFT_INNER_EXPR_PAYLOAD: - nft_payload_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, tun_ctx); + nft_payload_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, &tun_ctx); break; case NFT_INNER_EXPR_META: - nft_meta_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, tun_ctx); + nft_meta_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, &tun_ctx); break; default: WARN_ON_ONCE(1); goto err; } + nft_inner_save_tun_ctx(pkt, &tun_ctx); + return; err: regs->verdict.code = NFT_BREAK; diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 65bd291318f2..8bfac4185ac7 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -24,11 +24,13 @@ struct nft_rhash { struct rhashtable ht; struct delayed_work gc_work; + u32 wq_gc_seq; }; struct nft_rhash_elem { struct nft_elem_priv priv; struct rhash_head node; + u32 wq_gc_seq; struct nft_set_ext ext; }; @@ -338,6 +340,10 @@ static void nft_rhash_gc(struct work_struct *work) if (!gc) goto done; + /* Elements never collected use a zero gc worker sequence number. */ + if (unlikely(++priv->wq_gc_seq == 0)) + priv->wq_gc_seq++; + rhashtable_walk_enter(&priv->ht, &hti); rhashtable_walk_start(&hti); @@ -355,6 +361,14 @@ static void nft_rhash_gc(struct work_struct *work) goto try_later; } + /* rhashtable walk is unstable, already seen in this gc run? + * Then, skip this element. In case of (unlikely) sequence + * wraparound and stale element wq_gc_seq, next gc run will + * just find this expired element. + */ + if (he->wq_gc_seq == priv->wq_gc_seq) + continue; + if (nft_set_elem_is_dead(&he->ext)) goto dead_elem; @@ -371,6 +385,8 @@ dead_elem: if (!gc) goto try_later; + /* annotate gc sequence for this attempt. */ + he->wq_gc_seq = priv->wq_gc_seq; nft_trans_gc_elem_add(gc, he); } diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c index f5da0c1775f2..35d0409b0095 100644 --- a/net/netfilter/nft_socket.c +++ b/net/netfilter/nft_socket.c @@ -68,7 +68,7 @@ static noinline int nft_socket_cgroup_subtree_level(void) cgroup_put(cgrp); - if (WARN_ON_ONCE(level > 255)) + if (level > 255) return -ERANGE; if (WARN_ON_ONCE(level < 0)) diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c index 85f017e37cfc..9f54819eb52c 100644 --- a/net/netfilter/xt_IDLETIMER.c +++ b/net/netfilter/xt_IDLETIMER.c @@ -407,21 +407,23 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par) mutex_lock(&list_mutex); - if (--info->timer->refcnt == 0) { - pr_debug("deleting timer %s\n", info->label); - - list_del(&info->timer->entry); - timer_shutdown_sync(&info->timer->timer); - cancel_work_sync(&info->timer->work); - sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr); - kfree(info->timer->attr.attr.name); - kfree(info->timer); - } else { + if (--info->timer->refcnt > 0) { pr_debug("decreased refcnt of timer %s to %u\n", info->label, info->timer->refcnt); + mutex_unlock(&list_mutex); + return; } + pr_debug("deleting timer %s\n", info->label); + + list_del(&info->timer->entry); mutex_unlock(&list_mutex); + + timer_shutdown_sync(&info->timer->timer); + cancel_work_sync(&info->timer->work); + sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr); + kfree(info->timer->attr.attr.name); + kfree(info->timer); } static void idletimer_tg_destroy_v1(const struct xt_tgdtor_param *par) @@ -432,25 +434,27 @@ static void idletimer_tg_destroy_v1(const struct xt_tgdtor_param *par) mutex_lock(&list_mutex); - if (--info->timer->refcnt == 0) { - pr_debug("deleting timer %s\n", info->label); - - list_del(&info->timer->entry); - if (info->timer->timer_type & XT_IDLETIMER_ALARM) { - alarm_cancel(&info->timer->alarm); - } else { - timer_shutdown_sync(&info->timer->timer); - } - cancel_work_sync(&info->timer->work); - sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr); - kfree(info->timer->attr.attr.name); - kfree(info->timer); - } else { + if (--info->timer->refcnt > 0) { pr_debug("decreased refcnt of timer %s to %u\n", info->label, info->timer->refcnt); + mutex_unlock(&list_mutex); + return; } + pr_debug("deleting timer %s\n", info->label); + + list_del(&info->timer->entry); mutex_unlock(&list_mutex); + + if (info->timer->timer_type & XT_IDLETIMER_ALARM) { + alarm_cancel(&info->timer->alarm); + } else { + timer_shutdown_sync(&info->timer->timer); + } + cancel_work_sync(&info->timer->work); + sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr); + kfree(info->timer->attr.attr.name); + kfree(info->timer); } diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c index f7b0286d106a..8a80fd76fe45 100644 --- a/net/netfilter/xt_LED.c +++ b/net/netfilter/xt_LED.c @@ -96,7 +96,9 @@ static int led_tg_check(const struct xt_tgchk_param *par) struct xt_led_info_internal *ledinternal; int err; - if (ledinfo->id[0] == '\0') + /* Bail out if empty string or not a string at all. */ + if (ledinfo->id[0] == '\0' || + !memchr(ledinfo->id, '\0', sizeof(ledinfo->id))) return -EINVAL; mutex_lock(&xt_led_mutex); diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index 2b5e246b8d9a..b94cb2ffbaf8 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c @@ -754,6 +754,12 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) int ret; struct sk_buff *skbn; + /* + * Reject malformed packets early. Check that it contains at least 2 + * addresses and 1 byte more for Time-To-Live + */ + if (skb->len < 2 * sizeof(ax25_address) + 1) + return 0; nr_src = (ax25_address *)(skb->data + 0); nr_dest = (ax25_address *)(skb->data + 7); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 886c0dd47b66..2d73769d67f4 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -538,10 +538,8 @@ static void *packet_current_frame(struct packet_sock *po, return packet_lookup_frame(po, rb, rb->head, status); } -static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev) +static u16 vlan_get_tci(const struct sk_buff *skb, struct net_device *dev) { - u8 *skb_orig_data = skb->data; - int skb_orig_len = skb->len; struct vlan_hdr vhdr, *vh; unsigned int header_len; @@ -562,33 +560,21 @@ static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev) else return 0; - skb_push(skb, skb->data - skb_mac_header(skb)); - vh = skb_header_pointer(skb, header_len, sizeof(vhdr), &vhdr); - if (skb_orig_data != skb->data) { - skb->data = skb_orig_data; - skb->len = skb_orig_len; - } + vh = skb_header_pointer(skb, skb_mac_offset(skb) + header_len, + sizeof(vhdr), &vhdr); if (unlikely(!vh)) return 0; return ntohs(vh->h_vlan_TCI); } -static __be16 vlan_get_protocol_dgram(struct sk_buff *skb) +static __be16 vlan_get_protocol_dgram(const struct sk_buff *skb) { __be16 proto = skb->protocol; - if (unlikely(eth_type_vlan(proto))) { - u8 *skb_orig_data = skb->data; - int skb_orig_len = skb->len; - - skb_push(skb, skb->data - skb_mac_header(skb)); - proto = __vlan_get_protocol(skb, proto, NULL); - if (skb_orig_data != skb->data) { - skb->data = skb_orig_data; - skb->len = skb_orig_len; - } - } + if (unlikely(eth_type_vlan(proto))) + proto = __vlan_get_protocol_offset(skb, proto, + skb_mac_offset(skb), NULL); return proto; } diff --git a/net/psample/psample.c b/net/psample/psample.c index a0ddae8a65f9..25f92ba0840c 100644 --- a/net/psample/psample.c +++ b/net/psample/psample.c @@ -393,7 +393,9 @@ void psample_sample_packet(struct psample_group *group, nla_total_size_64bit(sizeof(u64)) + /* timestamp */ nla_total_size(sizeof(u16)) + /* protocol */ (md->user_cookie_len ? - nla_total_size(md->user_cookie_len) : 0); /* user cookie */ + nla_total_size(md->user_cookie_len) : 0) + /* user cookie */ + (md->rate_as_probability ? + nla_total_size(0) : 0); /* rate as probability */ #ifdef CONFIG_INET tun_info = skb_tunnel_info(skb); @@ -498,8 +500,9 @@ void psample_sample_packet(struct psample_group *group, md->user_cookie)) goto error; - if (md->rate_as_probability) - nla_put_flag(nl_skb, PSAMPLE_ATTR_SAMPLE_PROBABILITY); + if (md->rate_as_probability && + nla_put_flag(nl_skb, PSAMPLE_ATTR_SAMPLE_PROBABILITY)) + goto error; genlmsg_end(nl_skb, data); genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0, diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index a8e21060112f..9fa019e0dcad 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c @@ -203,7 +203,7 @@ MODULE_DEVICE_TABLE(of, rfkill_of_match); static struct platform_driver rfkill_gpio_driver = { .probe = rfkill_gpio_probe, - .remove_new = rfkill_gpio_remove, + .remove = rfkill_gpio_remove, .driver = { .name = "rfkill_gpio", .acpi_match_table = ACPI_PTR(rfkill_acpi_match), diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index e280c27cb9f9..1008ec8a464c 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1369,7 +1369,6 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, int err; md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len]; - memset(md, 0xff, sizeof(*md)); md->version = 1; if (!depth) @@ -1398,9 +1397,9 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index"); return -EINVAL; } + memset(&md->u.index, 0xff, sizeof(md->u.index)); if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]; - memset(&md->u, 0x00, sizeof(md->u)); md->u.index = nla_get_be32(nla); } } else if (md->version == 2) { @@ -1409,10 +1408,12 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid"); return -EINVAL; } + md->u.md2.dir = 1; if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) { nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]; md->u.md2.dir = nla_get_u8(nla); } + set_hwid(&md->u.md2, 0xff); if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) { nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]; set_hwid(&md->u.md2, nla_get_u8(nla)); diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index f2f9b75008bb..8d8b2db4653c 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -1525,7 +1525,6 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free) b->backlogs[idx] -= len; b->tin_backlog -= len; sch->qstats.backlog -= len; - qdisc_tree_reduce_backlog(sch, 1, len); flow->dropped++; b->tin_dropped++; @@ -1536,6 +1535,7 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free) __qdisc_drop(skb, to_free); sch->q.qlen--; + qdisc_tree_reduce_backlog(sch, 1, len); cake_heapify(q, 0); diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 1e940ad0d2fa..59e7bdf5063e 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -123,10 +123,10 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx, if (idx == q->tail) choke_zap_tail_holes(q); + --sch->q.qlen; qdisc_qstats_backlog_dec(sch, skb); qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); qdisc_drop(skb, sch, to_free); - --sch->q.qlen; } struct choke_skb_cb { diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index fe6fed291a7b..71ec9986ed37 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -79,6 +79,8 @@ struct netem_sched_data { struct sk_buff *t_head; struct sk_buff *t_tail; + u32 t_len; + /* optional qdisc for classful handling (NULL at netem init) */ struct Qdisc *qdisc; @@ -383,6 +385,7 @@ static void tfifo_reset(struct Qdisc *sch) rtnl_kfree_skbs(q->t_head, q->t_tail); q->t_head = NULL; q->t_tail = NULL; + q->t_len = 0; } static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) @@ -412,6 +415,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) rb_link_node(&nskb->rbnode, parent, p); rb_insert_color(&nskb->rbnode, &q->t_root); } + q->t_len++; sch->q.qlen++; } @@ -518,7 +522,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, 1<<get_random_u32_below(8); } - if (unlikely(sch->q.qlen >= sch->limit)) { + if (unlikely(q->t_len >= sch->limit)) { /* re-link segs, so that qdisc_drop_all() frees them all */ skb->next = segs; qdisc_drop_all(skb, sch, to_free); @@ -702,8 +706,8 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) tfifo_dequeue: skb = __qdisc_dequeue_head(&sch->q); if (skb) { - qdisc_qstats_backlog_dec(sch, skb); deliver: + qdisc_qstats_backlog_dec(sch, skb); qdisc_bstats_update(sch, skb); return skb; } @@ -719,8 +723,7 @@ deliver: if (time_to_send <= now && q->slot.slot_next <= now) { netem_erase_head(q, skb); - sch->q.qlen--; - qdisc_qstats_backlog_dec(sch, skb); + q->t_len--; skb->next = NULL; skb->prev = NULL; /* skb->dev shares skb->rbnode area, @@ -747,16 +750,21 @@ deliver: if (net_xmit_drop_count(err)) qdisc_qstats_drop(sch); qdisc_tree_reduce_backlog(sch, 1, pkt_len); + sch->qstats.backlog -= pkt_len; + sch->q.qlen--; } goto tfifo_dequeue; } + sch->q.qlen--; goto deliver; } if (q->qdisc) { skb = q->qdisc->ops->dequeue(q->qdisc); - if (skb) + if (skb) { + sch->q.qlen--; goto deliver; + } } qdisc_watchdog_schedule_ns(&q->watchdog, @@ -766,8 +774,10 @@ deliver: if (q->qdisc) { skb = q->qdisc->ops->dequeue(q->qdisc); - if (skb) + if (skb) { + sch->q.qlen--; goto deliver; + } } return NULL; } diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index f1d09183ae63..dc26b22d53c7 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -208,7 +208,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, struct tbf_sched_data *q = qdisc_priv(sch); struct sk_buff *segs, *nskb; netdev_features_t features = netif_skb_features(skb); - unsigned int len = 0, prev_len = qdisc_pkt_len(skb); + unsigned int len = 0, prev_len = qdisc_pkt_len(skb), seg_len; int ret, nb; segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); @@ -219,21 +219,27 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, nb = 0; skb_list_walk_safe(segs, segs, nskb) { skb_mark_not_on_list(segs); - qdisc_skb_cb(segs)->pkt_len = segs->len; - len += segs->len; + seg_len = segs->len; + qdisc_skb_cb(segs)->pkt_len = seg_len; ret = qdisc_enqueue(segs, q->qdisc, to_free); if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) qdisc_qstats_drop(sch); } else { nb++; + len += seg_len; } } sch->q.qlen += nb; - if (nb > 1) + sch->qstats.backlog += len; + if (nb > 0) { qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); - consume_skb(skb); - return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; + consume_skb(skb); + return NET_XMIT_SUCCESS; + } + + kfree_skb(skb); + return NET_XMIT_DROP; } static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, diff --git a/net/sctp/associola.c b/net/sctp/associola.c index c45c192b7878..0b0794f164cf 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -137,7 +137,8 @@ static struct sctp_association *sctp_association_init( = 5 * asoc->rto_max; asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; - asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ; + asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = + (unsigned long)sp->autoclose * HZ; /* Initializes the timers */ for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 9d76e902fd77..6cc7b846cff1 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -383,6 +383,7 @@ void smc_sk_init(struct net *net, struct sock *sk, int protocol) smc->limit_smc_hs = net->smc.limit_smc_hs; smc->use_fallback = false; /* assume rdma capability first */ smc->fallback_rsn = 0; + smc_close_init(smc); } static struct sock *smc_sock_alloc(struct net *net, struct socket *sock, @@ -1299,7 +1300,6 @@ static int smc_connect_rdma(struct smc_sock *smc, goto connect_abort; } - smc_close_init(smc); smc_rx_init(smc); if (ini->first_contact_local) { @@ -1435,7 +1435,6 @@ static int smc_connect_ism(struct smc_sock *smc, goto connect_abort; } } - smc_close_init(smc); smc_rx_init(smc); smc_tx_init(smc); @@ -1901,6 +1900,7 @@ static void smc_listen_out(struct smc_sock *new_smc) if (tcp_sk(new_smc->clcsock->sk)->syn_smc) atomic_dec(&lsmc->queued_smc_hs); + release_sock(newsmcsk); /* lock in smc_listen_work() */ if (lsmc->sk.sk_state == SMC_LISTEN) { lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING); smc_accept_enqueue(&lsmc->sk, newsmcsk); @@ -2032,6 +2032,8 @@ static int smc_listen_prfx_check(struct smc_sock *new_smc, if (pclc->hdr.typev1 == SMC_TYPE_N) return 0; pclc_prfx = smc_clc_proposal_get_prefix(pclc); + if (!pclc_prfx) + return -EPROTO; if (smc_clc_prfx_match(newclcsock, pclc_prfx)) return SMC_CLC_DECL_DIFFPREFIX; @@ -2145,6 +2147,8 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc, pclc_smcd = smc_get_clc_msg_smcd(pclc); smc_v2_ext = smc_get_clc_v2_ext(pclc); smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext); + if (!pclc_smcd || !smc_v2_ext || !smcd_v2_ext) + goto not_found; mutex_lock(&smcd_dev_list.mutex); if (pclc_smcd->ism.chid) { @@ -2221,7 +2225,9 @@ static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc, int rc = 0; /* check if ISM V1 is available */ - if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1)) + if (!(ini->smcd_version & SMC_V1) || + !smcd_indicated(ini->smc_type_v1) || + !pclc_smcd) goto not_found; ini->is_smcd = true; /* prepare ISM check */ ini->ism_peer_gid[0].gid = ntohll(pclc_smcd->ism.gid); @@ -2272,7 +2278,8 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc, goto not_found; smc_v2_ext = smc_get_clc_v2_ext(pclc); - if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL)) + if (!smc_v2_ext || + !smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL)) goto not_found; /* prepare RDMA check */ @@ -2422,6 +2429,7 @@ static void smc_listen_work(struct work_struct *work) u8 accept_version; int rc = 0; + lock_sock(&new_smc->sk); /* release in smc_listen_out() */ if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN) return smc_listen_out_err(new_smc); @@ -2479,7 +2487,6 @@ static void smc_listen_work(struct work_struct *work) goto out_decl; mutex_lock(&smc_server_lgr_pending); - smc_close_init(new_smc); smc_rx_init(new_smc); smc_tx_init(new_smc); @@ -2881,6 +2888,13 @@ __poll_t smc_poll(struct file *file, struct socket *sock, } else { sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + + if (sk->sk_state != SMC_INIT) { + /* Race breaker the same way as tcp_poll(). */ + smp_mb__after_atomic(); + if (atomic_read(&smc->conn.sndbuf_space)) + mask |= EPOLLOUT | EPOLLWRNORM; + } } if (atomic_read(&smc->conn.bytes_to_rcv)) mask |= EPOLLIN | EPOLLRDNORM; diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 33fa787c28eb..521f5df80e10 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -352,8 +352,11 @@ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc) struct smc_clc_msg_hdr *hdr = &pclc->hdr; struct smc_clc_v2_extension *v2_ext; - v2_ext = smc_get_clc_v2_ext(pclc); pclc_prfx = smc_clc_proposal_get_prefix(pclc); + if (!pclc_prfx || + pclc_prfx->ipv6_prefixes_cnt > SMC_CLC_MAX_V6_PREFIX) + return false; + if (hdr->version == SMC_V1) { if (hdr->typev1 == SMC_TYPE_N) return false; @@ -365,6 +368,13 @@ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc) sizeof(struct smc_clc_msg_trail)) return false; } else { + v2_ext = smc_get_clc_v2_ext(pclc); + if ((hdr->typev2 != SMC_TYPE_N && + (!v2_ext || v2_ext->hdr.eid_cnt > SMC_CLC_MAX_UEID)) || + (smcd_indicated(hdr->typev2) && + v2_ext->hdr.ism_gid_cnt > SMCD_CLC_MAX_V2_GID_ENTRIES)) + return false; + if (ntohs(hdr->length) != sizeof(*pclc) + sizeof(struct smc_clc_msg_smcd) + @@ -764,6 +774,11 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, SMC_CLC_RECV_BUF_LEN : datlen; iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, recvlen); len = sock_recvmsg(smc->clcsock, &msg, krflags); + if (len < recvlen) { + smc->sk.sk_err = EPROTO; + reason_code = -EPROTO; + goto out; + } datlen -= len; } if (clcm->type == SMC_CLC_DECLINE) { diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h index 5fd6f5b8ef03..767289925410 100644 --- a/net/smc/smc_clc.h +++ b/net/smc/smc_clc.h @@ -336,8 +336,12 @@ struct smc_clc_msg_decline_v2 { /* clc decline message */ static inline struct smc_clc_msg_proposal_prefix * smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc) { + u16 offset = ntohs(pclc->iparea_offset); + + if (offset > sizeof(struct smc_clc_msg_smcd)) + return NULL; return (struct smc_clc_msg_proposal_prefix *) - ((u8 *)pclc + sizeof(*pclc) + ntohs(pclc->iparea_offset)); + ((u8 *)pclc + sizeof(*pclc) + offset); } static inline bool smcr_indicated(int smc_type) @@ -376,8 +380,14 @@ static inline struct smc_clc_v2_extension * smc_get_clc_v2_ext(struct smc_clc_msg_proposal *prop) { struct smc_clc_msg_smcd *prop_smcd = smc_get_clc_msg_smcd(prop); + u16 max_offset; - if (!prop_smcd || !ntohs(prop_smcd->v2_ext_offset)) + max_offset = offsetof(struct smc_clc_msg_proposal_area, pclc_v2_ext) - + offsetof(struct smc_clc_msg_proposal_area, pclc_smcd) - + offsetofend(struct smc_clc_msg_smcd, v2_ext_offset); + + if (!prop_smcd || !ntohs(prop_smcd->v2_ext_offset) || + ntohs(prop_smcd->v2_ext_offset) > max_offset) return NULL; return (struct smc_clc_v2_extension *) @@ -390,9 +400,15 @@ smc_get_clc_v2_ext(struct smc_clc_msg_proposal *prop) static inline struct smc_clc_smcd_v2_extension * smc_get_clc_smcd_v2_ext(struct smc_clc_v2_extension *prop_v2ext) { + u16 max_offset = offsetof(struct smc_clc_msg_proposal_area, pclc_smcd_v2_ext) - + offsetof(struct smc_clc_msg_proposal_area, pclc_v2_ext) - + offsetof(struct smc_clc_v2_extension, hdr) - + offsetofend(struct smc_clnt_opts_area_hdr, smcd_v2_ext_offset); + if (!prop_v2ext) return NULL; - if (!ntohs(prop_v2ext->hdr.smcd_v2_ext_offset)) + if (!ntohs(prop_v2ext->hdr.smcd_v2_ext_offset) || + ntohs(prop_v2ext->hdr.smcd_v2_ext_offset) > max_offset) return NULL; return (struct smc_clc_smcd_v2_extension *) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 500952c2e67b..3b125d348b4a 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -1818,7 +1818,9 @@ void smcr_link_down_cond_sched(struct smc_link *lnk) { if (smc_link_downing(&lnk->state)) { trace_smcr_link_down(lnk, __builtin_return_address(0)); - schedule_work(&lnk->link_down_wrk); + smcr_link_hold(lnk); /* smcr_link_put in link_down_wrk */ + if (!schedule_work(&lnk->link_down_wrk)) + smcr_link_put(lnk); } } @@ -1850,11 +1852,14 @@ static void smc_link_down_work(struct work_struct *work) struct smc_link_group *lgr = link->lgr; if (list_empty(&lgr->list)) - return; + goto out; wake_up_all(&lgr->llc_msg_waiter); down_write(&lgr->llc_conf_mutex); smcr_link_down(link); up_write(&lgr->llc_conf_mutex); + +out: + smcr_link_put(link); /* smcr_link_hold by schedulers of link_down_work */ } static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev, diff --git a/net/sunrpc/auth_gss/gss_krb5_test.c b/net/sunrpc/auth_gss/gss_krb5_test.c index 85625e3f3814..a5bff02cd7ba 100644 --- a/net/sunrpc/auth_gss/gss_krb5_test.c +++ b/net/sunrpc/auth_gss/gss_krb5_test.c @@ -17,7 +17,7 @@ #include "gss_krb5_internal.h" -MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); struct gss_krb5_test_param { const char *desc; diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 439f75539977..108a4cc2e001 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -807,6 +807,7 @@ static void cleanup_bearer(struct work_struct *work) { struct udp_bearer *ub = container_of(work, struct udp_bearer, work); struct udp_replicast *rcast, *tmp; + struct tipc_net *tn; list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) { dst_cache_destroy(&rcast->dst_cache); @@ -814,10 +815,14 @@ static void cleanup_bearer(struct work_struct *work) kfree_rcu(rcast, rcu); } - atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count); + tn = tipc_net(sock_net(ub->ubsock->sk)); + dst_cache_destroy(&ub->rcast.dst_cache); udp_tunnel_sock_release(ub->ubsock); + + /* Note: could use a call_rcu() to avoid another synchronize_net() */ synchronize_net(); + atomic_dec(&tn->wq_count); kfree(ub); } diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 001ccc55ef0f..6b1762300443 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2313,6 +2313,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, fds_sent = true; if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; err = skb_splice_from_iter(skb, &msg->msg_iter, size, sk->sk_allocation); if (err < 0) { diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 25b28b1434f5..5cf8109f672a 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -117,12 +117,14 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr); static void vsock_sk_destruct(struct sock *sk); static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); +static void vsock_close(struct sock *sk, long timeout); /* Protocol family. */ struct proto vsock_proto = { .name = "AF_VSOCK", .owner = THIS_MODULE, .obj_size = sizeof(struct vsock_sock), + .close = vsock_close, #ifdef CONFIG_BPF_SYSCALL .psock_update_sk_prot = vsock_bpf_update_proto, #endif @@ -797,39 +799,37 @@ static bool sock_type_connectible(u16 type) static void __vsock_release(struct sock *sk, int level) { - if (sk) { - struct sock *pending; - struct vsock_sock *vsk; - - vsk = vsock_sk(sk); - pending = NULL; /* Compiler warning. */ + struct vsock_sock *vsk; + struct sock *pending; - /* When "level" is SINGLE_DEPTH_NESTING, use the nested - * version to avoid the warning "possible recursive locking - * detected". When "level" is 0, lock_sock_nested(sk, level) - * is the same as lock_sock(sk). - */ - lock_sock_nested(sk, level); + vsk = vsock_sk(sk); + pending = NULL; /* Compiler warning. */ - if (vsk->transport) - vsk->transport->release(vsk); - else if (sock_type_connectible(sk->sk_type)) - vsock_remove_sock(vsk); + /* When "level" is SINGLE_DEPTH_NESTING, use the nested + * version to avoid the warning "possible recursive locking + * detected". When "level" is 0, lock_sock_nested(sk, level) + * is the same as lock_sock(sk). + */ + lock_sock_nested(sk, level); - sock_orphan(sk); - sk->sk_shutdown = SHUTDOWN_MASK; + if (vsk->transport) + vsk->transport->release(vsk); + else if (sock_type_connectible(sk->sk_type)) + vsock_remove_sock(vsk); - skb_queue_purge(&sk->sk_receive_queue); + sock_orphan(sk); + sk->sk_shutdown = SHUTDOWN_MASK; - /* Clean up any sockets that never were accepted. */ - while ((pending = vsock_dequeue_accept(sk)) != NULL) { - __vsock_release(pending, SINGLE_DEPTH_NESTING); - sock_put(pending); - } + skb_queue_purge(&sk->sk_receive_queue); - release_sock(sk); - sock_put(sk); + /* Clean up any sockets that never were accepted. */ + while ((pending = vsock_dequeue_accept(sk)) != NULL) { + __vsock_release(pending, SINGLE_DEPTH_NESTING); + sock_put(pending); } + + release_sock(sk); + sock_put(sk); } static void vsock_sk_destruct(struct sock *sk) @@ -901,9 +901,22 @@ void vsock_data_ready(struct sock *sk) } EXPORT_SYMBOL_GPL(vsock_data_ready); +/* Dummy callback required by sockmap. + * See unconditional call of saved_close() in sock_map_close(). + */ +static void vsock_close(struct sock *sk, long timeout) +{ +} + static int vsock_release(struct socket *sock) { - __vsock_release(sock->sk, 0); + struct sock *sk = sock->sk; + + if (!sk) + return 0; + + sk->sk_prot->close(sk, 0); + __vsock_release(sk, 0); sock->sk = NULL; sock->state = SS_FREE; @@ -1054,6 +1067,9 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock, mask |= EPOLLRDHUP; } + if (sk_is_readable(sk)) + mask |= EPOLLIN | EPOLLRDNORM; + if (sock->type == SOCK_DGRAM) { /* For datagram sockets we can read if there is something in * the queue and write as long as the socket isn't shutdown for diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 9d2edb71f981..dd84fc54fb9b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -814,7 +814,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_MLO_LINKS] = NLA_POLICY_NESTED_ARRAY(nl80211_policy), [NL80211_ATTR_MLO_LINK_ID] = - NLA_POLICY_RANGE(NLA_U8, 0, IEEE80211_MLD_MAX_NUM_LINKS), + NLA_POLICY_RANGE(NLA_U8, 0, IEEE80211_MLD_MAX_NUM_LINKS - 1), [NL80211_ATTR_MLD_ADDR] = NLA_POLICY_EXACT_LEN(ETH_ALEN), [NL80211_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG }, [NL80211_ATTR_MAX_NUM_AKM_SUITES] = { .type = NLA_REJECT }, diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 431da30817a6..268171600087 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -83,6 +83,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) if (!request) return -ENOMEM; + request->n_channels = n_channels; if (wdev->conn->params.channel) { enum nl80211_band band = wdev->conn->params.channel->band; struct ieee80211_supported_band *sband = diff --git a/net/wireless/tests/chan.c b/net/wireless/tests/chan.c index 74bbee25085f..7b97b731993c 100644 --- a/net/wireless/tests/chan.c +++ b/net/wireless/tests/chan.c @@ -7,7 +7,7 @@ #include <net/cfg80211.h> #include <kunit/test.h> -MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); static struct ieee80211_channel chan_6ghz_1 = { .band = NL80211_BAND_6GHZ, diff --git a/net/wireless/tests/scan.c b/net/wireless/tests/scan.c index 9f458be71659..e12f620b5f42 100644 --- a/net/wireless/tests/scan.c +++ b/net/wireless/tests/scan.c @@ -14,7 +14,7 @@ /* mac80211 helpers for element building */ #include "../../mac80211/ieee80211_i.h" -MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); struct test_elem { u8 id; diff --git a/net/wireless/util.c b/net/wireless/util.c index 040d62051eb9..65c8e47246b7 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -2843,10 +2843,9 @@ void cfg80211_remove_link(struct wireless_dev *wdev, unsigned int link_id) break; } - wdev->valid_links &= ~BIT(link_id); - rdev_del_intf_link(rdev, wdev, link_id); + wdev->valid_links &= ~BIT(link_id); eth_zero_addr(wdev->links[link_id].addr); } diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index ae71da7d2cd6..1f7975b49657 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -387,10 +387,9 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) return; } - if (!refcount_dec_and_test(&dma_map->users)) - return; + if (refcount_dec_and_test(&dma_map->users)) + __xp_dma_unmap(dma_map, attrs); - __xp_dma_unmap(dma_map, attrs); kvfree(pool->dma_pages); pool->dma_pages = NULL; pool->dma_pages_cnt = 0; diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c index e1c526f97ce3..afa457506274 100644 --- a/net/xdp/xskmap.c +++ b/net/xdp/xskmap.c @@ -224,7 +224,7 @@ static long xsk_map_delete_elem(struct bpf_map *map, void *key) struct xsk_map *m = container_of(map, struct xsk_map, map); struct xdp_sock __rcu **map_entry; struct xdp_sock *old_xs; - int k = *(u32 *)key; + u32 k = *(u32 *)key; if (k >= map->max_entries) return -EINVAL; |