summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-12-16 16:23:12 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-12-16 16:23:12 +0100
commit362a7993ed0173e70859d0a624d8ffa968b2cea0 (patch)
tree5b031ff9d37e65f0daabdc9d8134ecd882ff70c6 /include
parentusb: storage: add a macro for the upper limit of max LUN (diff)
parentLinux 6.13-rc3 (diff)
downloadlinux-362a7993ed0173e70859d0a624d8ffa968b2cea0.tar.xz
linux-362a7993ed0173e70859d0a624d8ffa968b2cea0.zip
Merge 6.13-rc3 into usb-next
We need the USB fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/drm/display/drm_dp_mst_helper.h7
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/bpf.h20
-rw-r--r--include/linux/bpf_verifier.h1
-rw-r--r--include/linux/cleanup.h14
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/dsa/ocelot.h1
-rw-r--r--include/linux/export.h2
-rw-r--r--include/linux/filter.h2
-rw-r--r--include/linux/hid_bpf.h2
-rw-r--r--include/linux/io_uring/cmd.h4
-rw-r--r--include/linux/mfd/da9063/core.h1
-rw-r--r--include/linux/netdev_features.h7
-rw-r--r--include/linux/page-flags.h4
-rw-r--r--include/linux/pgalloc_tag.h4
-rw-r--r--include/linux/scatterlist.h2
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/stackdepot.h6
-rw-r--r--include/linux/virtio.h6
-rw-r--r--include/linux/ww_mutex.h4
-rw-r--r--include/net/bluetooth/bluetooth.h10
-rw-r--r--include/net/bluetooth/hci_core.h108
-rw-r--r--include/net/inet_timewait_sock.h2
-rw-r--r--include/net/lapb.h2
-rw-r--r--include/net/mac80211.h7
-rw-r--r--include/net/net_namespace.h6
-rw-r--r--include/net/netfilter/nf_tables.h4
-rw-r--r--include/net/netfilter/nf_tables_core.h1
-rw-r--r--include/soc/arc/arc_aux.h (renamed from include/soc/arc/aux.h)0
-rw-r--r--include/soc/arc/mcip.h2
-rw-r--r--include/soc/arc/timers.h2
-rw-r--r--include/soc/mscc/ocelot.h2
-rw-r--r--include/sound/cs35l56.h6
-rw-r--r--include/trace/events/damon.h2
-rw-r--r--include/uapi/linux/iommufd.h31
-rw-r--r--include/ufs/ufshcd.h10
37 files changed, 169 insertions, 129 deletions
diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
index f6a1cbb0f600..a80ba457a858 100644
--- a/include/drm/display/drm_dp_mst_helper.h
+++ b/include/drm/display/drm_dp_mst_helper.h
@@ -700,6 +700,13 @@ struct drm_dp_mst_topology_mgr {
bool payload_id_table_cleared : 1;
/**
+ * @reset_rx_state: The down request's reply and up request message
+ * receiver state must be reset, after the topology manager got
+ * removed. Protected by @lock.
+ */
+ bool reset_rx_state : 1;
+
+ /**
* @payload_count: The number of currently active payloads in hardware. This value is only
* intended to be used internally by MST helpers for payload tracking, and is only safe to
* read/write from the atomic commit (not check) context.
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 60830a6a5939..7a1b3b1a8fed 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -423,7 +423,7 @@ void __bio_add_page(struct bio *bio, struct page *page,
void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
size_t off);
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
-void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter);
+void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter);
void __bio_release_pages(struct bio *bio, bool mark_dirty);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 08a727b40816..378d3a1a22fc 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -200,8 +200,6 @@ struct gendisk {
spinlock_t zone_wplugs_lock;
struct mempool_s *zone_wplugs_pool;
struct hlist_head *zone_wplugs_hash;
- struct list_head zone_wplugs_err_list;
- struct work_struct zone_wplugs_work;
struct workqueue_struct *zone_wplugs_wq;
#endif /* CONFIG_BLK_DEV_ZONED */
@@ -1421,6 +1419,9 @@ static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector)
return is_seq;
}
+int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask);
+
static inline unsigned int queue_dma_alignment(const struct request_queue *q)
{
return q->limits.dma_alignment;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index eaee2a819f4c..6e63dd3443b9 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1527,6 +1527,7 @@ struct bpf_prog_aux {
bool is_extended; /* true if extended by freplace program */
bool jits_use_priv_stack;
bool priv_stack_requested;
+ bool changes_pkt_data;
u64 prog_array_member_cnt; /* counts how many times as member of prog_array */
struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */
struct bpf_arena *arena;
@@ -2193,26 +2194,25 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
* rcu-protected dynamically sized maps.
*/
static __always_inline u32
-bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
+bpf_prog_run_array_uprobe(const struct bpf_prog_array *array,
const void *ctx, bpf_prog_run_fn run_prog)
{
const struct bpf_prog_array_item *item;
const struct bpf_prog *prog;
- const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx;
struct bpf_trace_run_ctx run_ctx;
u32 ret = 1;
might_fault();
+ RCU_LOCKDEP_WARN(!rcu_read_lock_trace_held(), "no rcu lock held");
+
+ if (unlikely(!array))
+ return ret;
- rcu_read_lock_trace();
migrate_disable();
run_ctx.is_uprobe = true;
- array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
- if (unlikely(!array))
- goto out;
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
item = &array->items[0];
while ((prog = READ_ONCE(item->prog))) {
@@ -2227,9 +2227,7 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
rcu_read_unlock();
}
bpf_reset_run_ctx(old_run_ctx);
-out:
migrate_enable();
- rcu_read_unlock_trace();
return ret;
}
@@ -3516,10 +3514,4 @@ static inline bool bpf_is_subprog(const struct bpf_prog *prog)
return prog->aux->func_idx != 0;
}
-static inline bool bpf_prog_is_raw_tp(const struct bpf_prog *prog)
-{
- return prog->type == BPF_PROG_TYPE_TRACING &&
- prog->expected_attach_type == BPF_TRACE_RAW_TP;
-}
-
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index f4290c179bee..48b7b2eeb7e2 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -659,6 +659,7 @@ struct bpf_subprog_info {
bool args_cached: 1;
/* true if bpf_fastcall stack region is used by functions that can't be inlined */
bool keep_fastcall_stack: 1;
+ bool changes_pkt_data: 1;
enum priv_stack_mode priv_stack_mode;
u8 arg_cnt;
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index 966fcc5ff8ef..ec00e3f7af2b 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -273,12 +273,6 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
* an anonymous instance of the (guard) class, not recommended for
* conditional locks.
*
- * if_not_guard(name, args...) { <error handling> }:
- * convenience macro for conditional guards that calls the statement that
- * follows only if the lock was not acquired (typically an error return).
- *
- * Only for conditional locks.
- *
* scoped_guard (name, args...) { }:
* similar to CLASS(name, scope)(args), except the variable (with the
* explicit name 'scope') is declard in a for-loop such that its scope is
@@ -350,14 +344,6 @@ _label: \
#define scoped_cond_guard(_name, _fail, args...) \
__scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args)
-#define __if_not_guard(_name, _id, args...) \
- BUILD_BUG_ON(!__is_cond_ptr(_name)); \
- CLASS(_name, _id)(args); \
- if (!__guard_ptr(_name)(&_id))
-
-#define if_not_guard(_name, args...) \
- __if_not_guard(_name, __UNIQUE_ID(guard), args)
-
/*
* Additional helper macros for generating lock guards with types, either for
* locks that don't have a native type (eg. RCU, preempt) or those that need a
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index ef1b16da6ad5..65b7c41471c3 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -49,6 +49,7 @@ struct module;
* @archdata: Optional arch-specific data
* @max_cycles: Maximum safe cycle value which won't overflow on
* multiplication
+ * @max_raw_delta: Maximum safe delta value for negative motion detection
* @name: Pointer to clocksource name
* @list: List head for registration (internal)
* @freq_khz: Clocksource frequency in khz.
@@ -109,6 +110,7 @@ struct clocksource {
struct arch_clocksource_data archdata;
#endif
u64 max_cycles;
+ u64 max_raw_delta;
const char *name;
struct list_head list;
u32 freq_khz;
diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
index 6fbfbde68a37..620a3260fc08 100644
--- a/include/linux/dsa/ocelot.h
+++ b/include/linux/dsa/ocelot.h
@@ -15,6 +15,7 @@
struct ocelot_skb_cb {
struct sk_buff *clone;
unsigned int ptp_class; /* valid only for clones */
+ unsigned long ptp_tx_time; /* valid only for clones */
u32 tstamp_lo;
u8 ptp_cmd;
u8 ts_id;
diff --git a/include/linux/export.h b/include/linux/export.h
index f5f3950a1e42..2633df4d31e6 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -60,7 +60,7 @@
#endif
#ifdef DEFAULT_SYMBOL_NAMESPACE
-#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, __stringify(DEFAULT_SYMBOL_NAMESPACE))
+#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, DEFAULT_SYMBOL_NAMESPACE)
#else
#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, "")
#endif
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 3a21947f2fd4..0477254bc2d3 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1122,7 +1122,7 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
bool bpf_jit_supports_private_stack(void);
u64 bpf_arch_uaddress_limit(void);
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
-bool bpf_helper_changes_pkt_data(void *func);
+bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id);
static inline bool bpf_dump_raw_ok(const struct cred *cred)
{
diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h
index a6876ab29004..a2e47dbcf82c 100644
--- a/include/linux/hid_bpf.h
+++ b/include/linux/hid_bpf.h
@@ -78,7 +78,7 @@ struct hid_ops {
const struct bus_type *bus_type;
};
-extern struct hid_ops *hid_ops;
+extern const struct hid_ops *hid_ops;
/**
* struct hid_bpf_ops - A BPF struct_ops of callbacks allowing to attach HID-BPF
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
index 578a3fdf5c71..0d5448c0b86c 100644
--- a/include/linux/io_uring/cmd.h
+++ b/include/linux/io_uring/cmd.h
@@ -43,7 +43,7 @@ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
* Note: the caller should never hard code @issue_flags and is only allowed
* to pass the mask provided by the core io_uring code.
*/
-void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
+void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, u64 res2,
unsigned issue_flags);
void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
@@ -67,7 +67,7 @@ static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
return -EOPNOTSUPP;
}
static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
- ssize_t ret2, unsigned issue_flags)
+ u64 ret2, unsigned issue_flags)
{
}
static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index 8db52324f416..eae82f421414 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -78,6 +78,7 @@ struct da9063 {
enum da9063_type type;
unsigned char variant_code;
unsigned int flags;
+ bool use_sw_pm;
/* Control interface */
struct regmap *regmap;
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 66e7d26b70a4..11be70a7929f 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -253,4 +253,11 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)
+static inline netdev_features_t netdev_base_features(netdev_features_t features)
+{
+ features &= ~NETIF_F_ONE_FOR_ALL;
+ features |= NETIF_F_ALL_FOR_ALL;
+ return features;
+}
+
#endif /* _LINUX_NETDEV_FEATURES_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 2220bfec278e..cf46ac720802 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -306,7 +306,7 @@ static const unsigned long *const_folio_flags(const struct folio *folio,
{
const struct page *page = &folio->page;
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
+ VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
return &page[n].flags;
}
@@ -315,7 +315,7 @@ static unsigned long *folio_flags(struct folio *folio, unsigned n)
{
struct page *page = &folio->page;
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
+ VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
return &page[n].flags;
}
diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h
index 0e43ab653ab6..3469c4b20105 100644
--- a/include/linux/pgalloc_tag.h
+++ b/include/linux/pgalloc_tag.h
@@ -231,7 +231,7 @@ static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
}
void pgalloc_tag_split(struct folio *folio, int old_order, int new_order);
-void pgalloc_tag_copy(struct folio *new, struct folio *old);
+void pgalloc_tag_swap(struct folio *new, struct folio *old);
void __init alloc_tag_sec_init(void);
@@ -245,7 +245,7 @@ static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL
static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
static inline void alloc_tag_sec_init(void) {}
static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {}
-static inline void pgalloc_tag_copy(struct folio *new, struct folio *old) {}
+static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {}
#endif /* CONFIG_MEM_ALLOC_PROFILING */
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index c5e2239b550e..d836e7440ee8 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -313,7 +313,7 @@ static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
}
/**
- * sg_unmark_bus_address - Unmark the scatterlist entry as a bus address
+ * sg_dma_unmark_bus_address - Unmark the scatterlist entry as a bus address
* @sg: SG entry
*
* Description:
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d380bffee2ef..66b311fbd5d6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -656,6 +656,12 @@ struct sched_dl_entity {
* @dl_defer_armed tells if the deferrable server is waiting
* for the replenishment timer to activate it.
*
+ * @dl_server_active tells if the dlserver is active(started).
+ * dlserver is started on first cfs enqueue on an idle runqueue
+ * and is stopped when a dequeue results in 0 cfs tasks on the
+ * runqueue. In other words, dlserver is active only when cpu's
+ * runqueue has atleast one cfs task.
+ *
* @dl_defer_running tells if the deferrable server is actually
* running, skipping the defer phase.
*/
@@ -664,6 +670,7 @@ struct sched_dl_entity {
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
unsigned int dl_server : 1;
+ unsigned int dl_server_active : 1;
unsigned int dl_defer : 1;
unsigned int dl_defer_armed : 1;
unsigned int dl_defer_running : 1;
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index e9ec32fb97d4..2cc21ffcdaf9 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -147,7 +147,7 @@ static inline int stack_depot_early_init(void) { return 0; }
* If the provided stack trace comes from the interrupt context, only the part
* up to the interrupt entry is saved.
*
- * Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if
+ * Context: Any context, but unsetting STACK_DEPOT_FLAG_CAN_ALLOC is required if
* alloc_pages() cannot be used from the current context. Currently
* this is the case for contexts where neither %GFP_ATOMIC nor
* %GFP_NOWAIT can be used (NMI, raw_spin_lock).
@@ -156,7 +156,7 @@ static inline int stack_depot_early_init(void) { return 0; }
*/
depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
unsigned int nr_entries,
- gfp_t gfp_flags,
+ gfp_t alloc_flags,
depot_flags_t depot_flags);
/**
@@ -175,7 +175,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
* Return: Handle of the stack trace stored in depot, 0 on failure
*/
depot_stack_handle_t stack_depot_save(unsigned long *entries,
- unsigned int nr_entries, gfp_t gfp_flags);
+ unsigned int nr_entries, gfp_t alloc_flags);
/**
* __stack_depot_get_stack_record - Get a pointer to a stack_record struct
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 57cc4b07fd17..dd88682e27e3 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -109,9 +109,11 @@ dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *vq);
dma_addr_t virtqueue_get_used_addr(const struct virtqueue *vq);
int virtqueue_resize(struct virtqueue *vq, u32 num,
- void (*recycle)(struct virtqueue *vq, void *buf));
+ void (*recycle)(struct virtqueue *vq, void *buf),
+ void (*recycle_done)(struct virtqueue *vq));
int virtqueue_reset(struct virtqueue *vq,
- void (*recycle)(struct virtqueue *vq, void *buf));
+ void (*recycle)(struct virtqueue *vq, void *buf),
+ void (*recycle_done)(struct virtqueue *vq));
struct virtio_admin_cmd {
__le16 opcode;
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index a401a2f31a77..45ff6f7a872b 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -156,8 +156,8 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
&ww_class->acquire_key, 0);
- lockdep_init_map(&ctx->first_lock_dep_map, ww_class->mutex_name,
- &ww_class->mutex_key, 0);
+ lockdep_init_map_wait(&ctx->first_lock_dep_map, ww_class->mutex_name,
+ &ww_class->mutex_key, 0, LD_WAIT_SLEEP);
mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx->dep_map, _RET_IP_);
#endif
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index f66bc85c6411..435250c72d56 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -123,6 +123,7 @@ struct bt_voice {
#define BT_VOICE_TRANSPARENT 0x0003
#define BT_VOICE_CVSD_16BIT 0x0060
+#define BT_VOICE_TRANSPARENT_16BIT 0x0063
#define BT_SNDMTU 12
#define BT_RCVMTU 13
@@ -590,15 +591,6 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
return skb;
}
-static inline int bt_copy_from_sockptr(void *dst, size_t dst_size,
- sockptr_t src, size_t src_size)
-{
- if (dst_size > src_size)
- return -EINVAL;
-
- return copy_from_sockptr(dst, src, dst_size);
-}
-
int bt_to_errno(u16 code);
__u8 bt_status(int err);
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index ea798f07c5a2..ca22ead85dbe 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -804,7 +804,6 @@ struct hci_conn_params {
extern struct list_head hci_dev_list;
extern struct list_head hci_cb_list;
extern rwlock_t hci_dev_list_lock;
-extern struct mutex hci_cb_list_lock;
#define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags)
#define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags)
@@ -2017,24 +2016,47 @@ struct hci_cb {
char *name;
+ bool (*match) (struct hci_conn *conn);
void (*connect_cfm) (struct hci_conn *conn, __u8 status);
void (*disconn_cfm) (struct hci_conn *conn, __u8 status);
void (*security_cfm) (struct hci_conn *conn, __u8 status,
- __u8 encrypt);
+ __u8 encrypt);
void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
};
+static inline void hci_cb_lookup(struct hci_conn *conn, struct list_head *list)
+{
+ struct hci_cb *cb, *cpy;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(cb, &hci_cb_list, list) {
+ if (cb->match && cb->match(conn)) {
+ cpy = kmalloc(sizeof(*cpy), GFP_ATOMIC);
+ if (!cpy)
+ break;
+
+ *cpy = *cb;
+ INIT_LIST_HEAD(&cpy->list);
+ list_add_rcu(&cpy->list, list);
+ }
+ }
+ rcu_read_unlock();
+}
+
static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
{
- struct hci_cb *cb;
+ struct list_head list;
+ struct hci_cb *cb, *tmp;
+
+ INIT_LIST_HEAD(&list);
+ hci_cb_lookup(conn, &list);
- mutex_lock(&hci_cb_list_lock);
- list_for_each_entry(cb, &hci_cb_list, list) {
+ list_for_each_entry_safe(cb, tmp, &list, list) {
if (cb->connect_cfm)
cb->connect_cfm(conn, status);
+ kfree(cb);
}
- mutex_unlock(&hci_cb_list_lock);
if (conn->connect_cfm_cb)
conn->connect_cfm_cb(conn, status);
@@ -2042,43 +2064,55 @@ static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason)
{
- struct hci_cb *cb;
+ struct list_head list;
+ struct hci_cb *cb, *tmp;
+
+ INIT_LIST_HEAD(&list);
+ hci_cb_lookup(conn, &list);
- mutex_lock(&hci_cb_list_lock);
- list_for_each_entry(cb, &hci_cb_list, list) {
+ list_for_each_entry_safe(cb, tmp, &list, list) {
if (cb->disconn_cfm)
cb->disconn_cfm(conn, reason);
+ kfree(cb);
}
- mutex_unlock(&hci_cb_list_lock);
if (conn->disconn_cfm_cb)
conn->disconn_cfm_cb(conn, reason);
}
-static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
+static inline void hci_security_cfm(struct hci_conn *conn, __u8 status,
+ __u8 encrypt)
{
- struct hci_cb *cb;
- __u8 encrypt;
-
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
- return;
+ struct list_head list;
+ struct hci_cb *cb, *tmp;
- encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
+ INIT_LIST_HEAD(&list);
+ hci_cb_lookup(conn, &list);
- mutex_lock(&hci_cb_list_lock);
- list_for_each_entry(cb, &hci_cb_list, list) {
+ list_for_each_entry_safe(cb, tmp, &list, list) {
if (cb->security_cfm)
cb->security_cfm(conn, status, encrypt);
+ kfree(cb);
}
- mutex_unlock(&hci_cb_list_lock);
if (conn->security_cfm_cb)
conn->security_cfm_cb(conn, status);
}
+static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
+{
+ __u8 encrypt;
+
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
+ return;
+
+ encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
+
+ hci_security_cfm(conn, status, encrypt);
+}
+
static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
{
- struct hci_cb *cb;
__u8 encrypt;
if (conn->state == BT_CONFIG) {
@@ -2105,40 +2139,38 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
conn->sec_level = conn->pending_sec_level;
}
- mutex_lock(&hci_cb_list_lock);
- list_for_each_entry(cb, &hci_cb_list, list) {
- if (cb->security_cfm)
- cb->security_cfm(conn, status, encrypt);
- }
- mutex_unlock(&hci_cb_list_lock);
-
- if (conn->security_cfm_cb)
- conn->security_cfm_cb(conn, status);
+ hci_security_cfm(conn, status, encrypt);
}
static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
{
- struct hci_cb *cb;
+ struct list_head list;
+ struct hci_cb *cb, *tmp;
+
+ INIT_LIST_HEAD(&list);
+ hci_cb_lookup(conn, &list);
- mutex_lock(&hci_cb_list_lock);
- list_for_each_entry(cb, &hci_cb_list, list) {
+ list_for_each_entry_safe(cb, tmp, &list, list) {
if (cb->key_change_cfm)
cb->key_change_cfm(conn, status);
+ kfree(cb);
}
- mutex_unlock(&hci_cb_list_lock);
}
static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
__u8 role)
{
- struct hci_cb *cb;
+ struct list_head list;
+ struct hci_cb *cb, *tmp;
+
+ INIT_LIST_HEAD(&list);
+ hci_cb_lookup(conn, &list);
- mutex_lock(&hci_cb_list_lock);
- list_for_each_entry(cb, &hci_cb_list, list) {
+ list_for_each_entry_safe(cb, tmp, &list, list) {
if (cb->role_switch_cfm)
cb->role_switch_cfm(conn, status, role);
+ kfree(cb);
}
- mutex_unlock(&hci_cb_list_lock);
}
static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index beb533a0e880..62c0a7e65d6b 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -45,6 +45,8 @@ struct inet_timewait_sock {
#define tw_node __tw_common.skc_nulls_node
#define tw_bind_node __tw_common.skc_bind_node
#define tw_refcnt __tw_common.skc_refcnt
+#define tw_tx_queue_mapping __tw_common.skc_tx_queue_mapping
+#define tw_rx_queue_mapping __tw_common.skc_rx_queue_mapping
#define tw_hash __tw_common.skc_hash
#define tw_prot __tw_common.skc_prot
#define tw_net __tw_common.skc_net
diff --git a/include/net/lapb.h b/include/net/lapb.h
index 124ee122f2c8..6c07420644e4 100644
--- a/include/net/lapb.h
+++ b/include/net/lapb.h
@@ -4,7 +4,7 @@
#include <linux/lapb.h>
#include <linux/refcount.h>
-#define LAPB_HEADER_LEN 20 /* LAPB over Ethernet + a bit more */
+#define LAPB_HEADER_LEN MAX_HEADER /* LAPB over Ethernet + a bit more */
#define LAPB_ACK_PENDING_CONDITION 0x01
#define LAPB_REJECT_CONDITION 0x02
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index a97c9f85ae9a..ab8dce1f2c27 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1987,6 +1987,8 @@ enum ieee80211_neg_ttlm_res {
* @neg_ttlm: negotiated TID to link mapping info.
* see &struct ieee80211_neg_ttlm.
* @addr: address of this interface
+ * @addr_valid: indicates if the address is actively used. Set to false for
+ * passive monitor interfaces, true in all other cases.
* @p2p: indicates whether this AP or STA interface is a p2p
* interface, i.e. a GO or p2p-sta respectively
* @netdev_features: tx netdev features supported by the hardware for this
@@ -2026,6 +2028,7 @@ struct ieee80211_vif {
u16 valid_links, active_links, dormant_links, suspended_links;
struct ieee80211_neg_ttlm neg_ttlm;
u8 addr[ETH_ALEN] __aligned(2);
+ bool addr_valid;
bool p2p;
u8 cab_queue;
@@ -6795,14 +6798,12 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success,
/**
* ieee80211_channel_switch_disconnect - disconnect due to channel switch error
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
- * @block_tx: if %true, do not send deauth frame.
*
* Instruct mac80211 to disconnect due to a channel switch error. The channel
* switch can request to block the tx and so, we need to make sure we do not send
* a deauth frame in this case.
*/
-void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif,
- bool block_tx);
+void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif);
/**
* ieee80211_request_smps - request SM PS transition
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 873c0f9fdac6..5a2a0df8ad91 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -80,6 +80,7 @@ struct net {
* or to unregister pernet ops
* (pernet_ops_rwsem write locked).
*/
+ struct llist_node defer_free_list;
struct llist_node cleanup_list; /* namespaces on death row */
#ifdef CONFIG_KEYS
@@ -325,6 +326,11 @@ static inline int check_net(const struct net *net)
#define net_drop_ns NULL
#endif
+/* Returns true if the netns initialization is completed successfully */
+static inline bool net_initialized(const struct net *net)
+{
+ return READ_ONCE(net->list.next);
+}
static inline void __netns_tracker_alloc(struct net *net,
netns_tracker *tracker,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 80a537ac26cd..4afa64c81304 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1103,7 +1103,6 @@ struct nft_rule_blob {
* @name: name of the chain
* @udlen: user data length
* @udata: user data in the chain
- * @rcu_head: rcu head for deferred release
* @blob_next: rule blob pointer to the next in the chain
*/
struct nft_chain {
@@ -1121,7 +1120,6 @@ struct nft_chain {
char *name;
u16 udlen;
u8 *udata;
- struct rcu_head rcu_head;
/* Only used during control plane commit phase: */
struct nft_rule_blob *blob_next;
@@ -1265,7 +1263,6 @@ static inline void nft_use_inc_restore(u32 *use)
* @sets: sets in the table
* @objects: stateful objects in the table
* @flowtables: flow tables in the table
- * @net: netnamespace this table belongs to
* @hgenerator: handle generator state
* @handle: table handle
* @use: number of chain references to this table
@@ -1285,7 +1282,6 @@ struct nft_table {
struct list_head sets;
struct list_head objects;
struct list_head flowtables;
- possible_net_t net;
u64 hgenerator;
u64 handle;
u32 use;
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index ff27cb2e1662..03b6165756fc 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -161,6 +161,7 @@ enum {
};
struct nft_inner_tun_ctx {
+ unsigned long cookie;
u16 type;
u16 inner_tunoff;
u16 inner_lloff;
diff --git a/include/soc/arc/aux.h b/include/soc/arc/arc_aux.h
index 9c2eff6140b6..9c2eff6140b6 100644
--- a/include/soc/arc/aux.h
+++ b/include/soc/arc/arc_aux.h
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h
index d1a93c73f006..a78dacd149f1 100644
--- a/include/soc/arc/mcip.h
+++ b/include/soc/arc/mcip.h
@@ -8,7 +8,7 @@
#ifndef __SOC_ARC_MCIP_H
#define __SOC_ARC_MCIP_H
-#include <soc/arc/aux.h>
+#include <soc/arc/arc_aux.h>
#define ARC_REG_MCIP_BCR 0x0d0
#define ARC_REG_MCIP_IDU_BCR 0x0D5
diff --git a/include/soc/arc/timers.h b/include/soc/arc/timers.h
index ae99d3e855f1..51a74166296c 100644
--- a/include/soc/arc/timers.h
+++ b/include/soc/arc/timers.h
@@ -6,7 +6,7 @@
#ifndef __SOC_ARC_TIMERS_H
#define __SOC_ARC_TIMERS_H
-#include <soc/arc/aux.h>
+#include <soc/arc/arc_aux.h>
/* Timer related Aux registers */
#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index 462c653e1017..2db9ae0575b6 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -778,7 +778,6 @@ struct ocelot_port {
phy_interface_t phy_mode;
- unsigned int ptp_skbs_in_flight;
struct sk_buff_head tx_skbs;
unsigned int trap_proto;
@@ -786,7 +785,6 @@ struct ocelot_port {
u16 mrp_ring_id;
u8 ptp_cmd;
- u8 ts_id;
u8 index;
diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
index 94e8185c4795..3dc7a1551ac3 100644
--- a/include/sound/cs35l56.h
+++ b/include/sound/cs35l56.h
@@ -271,12 +271,6 @@ struct cs35l56_base {
struct gpio_desc *reset_gpio;
};
-/* Temporary to avoid a build break with the HDA driver */
-static inline int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base)
-{
- return 0;
-}
-
static inline bool cs35l56_is_otp_register(unsigned int reg)
{
return (reg >> 16) == 3;
diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h
index 23200aabccac..da4bd9fd1162 100644
--- a/include/trace/events/damon.h
+++ b/include/trace/events/damon.h
@@ -15,7 +15,7 @@ TRACE_EVENT_CONDITION(damos_before_apply,
unsigned int target_idx, struct damon_region *r,
unsigned int nr_regions, bool do_trace),
- TP_ARGS(context_idx, target_idx, scheme_idx, r, nr_regions, do_trace),
+ TP_ARGS(context_idx, scheme_idx, target_idx, r, nr_regions, do_trace),
TP_CONDITION(do_trace),
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index 059b6537f2b7..34810f6ae2b5 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -297,7 +297,7 @@ struct iommu_ioas_unmap {
* ioctl(IOMMU_OPTION_HUGE_PAGES)
* @IOMMU_OPTION_RLIMIT_MODE:
* Change how RLIMIT_MEMLOCK accounting works. The caller must have privilege
- * to invoke this. Value 0 (default) is user based accouting, 1 uses process
+ * to invoke this. Value 0 (default) is user based accounting, 1 uses process
* based accounting. Global option, object_id must be 0
* @IOMMU_OPTION_HUGE_PAGES:
* Value 1 (default) allows contiguous pages to be combined when generating
@@ -390,7 +390,7 @@ struct iommu_vfio_ioas {
* @IOMMU_HWPT_ALLOC_PASID: Requests a domain that can be used with PASID. The
* domain can be attached to any PASID on the device.
* Any domain attached to the non-PASID part of the
- * device must also be flaged, otherwise attaching a
+ * device must also be flagged, otherwise attaching a
* PASID will blocked.
* If IOMMU does not support PASID it will return
* error (-EOPNOTSUPP).
@@ -558,16 +558,25 @@ struct iommu_hw_info_vtd {
* For the details of @idr, @iidr and @aidr, please refer to the chapters
* from 6.3.1 to 6.3.6 in the SMMUv3 Spec.
*
- * User space should read the underlying ARM SMMUv3 hardware information for
- * the list of supported features.
+ * This reports the raw HW capability, and not all bits are meaningful to be
+ * read by userspace. Only the following fields should be used:
*
- * Note that these values reflect the raw HW capability, without any insight if
- * any required kernel driver support is present. Bits may be set indicating the
- * HW has functionality that is lacking kernel software support, such as BTM. If
- * a VMM is using this information to construct emulated copies of these
- * registers it should only forward bits that it knows it can support.
+ * idr[0]: ST_LEVEL, TERM_MODEL, STALL_MODEL, TTENDIAN , CD2L, ASID16, TTF
+ * idr[1]: SIDSIZE, SSIDSIZE
+ * idr[3]: BBML, RIL
+ * idr[5]: VAX, GRAN64K, GRAN16K, GRAN4K
*
- * In future, presence of required kernel support will be indicated in flags.
+ * - S1P should be assumed to be true if a NESTED HWPT can be created
+ * - VFIO/iommufd only support platforms with COHACC, it should be assumed to be
+ * true.
+ * - ATS is a per-device property. If the VMM describes any devices as ATS
+ * capable in ACPI/DT it should set the corresponding idr.
+ *
+ * This list may expand in future (eg E0PD, AIE, PBHA, D128, DS etc). It is
+ * important that VMMs do not read bits outside the list to allow for
+ * compatibility with future kernels. Several features in the SMMUv3
+ * architecture are not currently supported by the kernel for nesting: HTTU,
+ * BTM, MPAM and others.
*/
struct iommu_hw_info_arm_smmuv3 {
__u32 flags;
@@ -766,7 +775,7 @@ struct iommu_hwpt_vtd_s1_invalidate {
};
/**
- * struct iommu_viommu_arm_smmuv3_invalidate - ARM SMMUv3 cahce invalidation
+ * struct iommu_viommu_arm_smmuv3_invalidate - ARM SMMUv3 cache invalidation
* (IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3)
* @cmd: 128-bit cache invalidation command that runs in SMMU CMDQ.
* Must be little-endian.
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index d7aca9e61684..d650ae6b58d3 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -310,7 +310,9 @@ struct ufs_pwr_mode_info {
* to allow variant specific Uni-Pro initialization.
* @pwr_change_notify: called before and after a power mode change
* is carried out to allow vendor spesific capabilities
- * to be set.
+ * to be set. PRE_CHANGE can modify final_params based
+ * on desired_pwr_mode, but POST_CHANGE must not alter
+ * the final_params parameter
* @setup_xfer_req: called before any transfer request is issued
* to set some things
* @setup_task_mgmt: called before any task management request is issued
@@ -353,9 +355,9 @@ struct ufs_hba_variant_ops {
int (*link_startup_notify)(struct ufs_hba *,
enum ufs_notify_change_status);
int (*pwr_change_notify)(struct ufs_hba *,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *,
- struct ufs_pa_layer_attr *);
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *desired_pwr_mode,
+ struct ufs_pa_layer_attr *final_params);
void (*setup_xfer_req)(struct ufs_hba *hba, int tag,
bool is_scsi_cmd);
void (*setup_task_mgmt)(struct ufs_hba *, int, u8);