diff options
Diffstat (limited to 'kernel')
150 files changed, 9410 insertions, 2484 deletions
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index 416017301660..5876e30c5740 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -99,3 +99,23 @@ config PREEMPT_DYNAMIC Interesting if you want the same pre-built kernel should be used for both Server and Desktop workloads. + +config SCHED_CORE + bool "Core Scheduling for SMT" + depends on SCHED_SMT + help + This option permits Core Scheduling, a means of coordinated task + selection across SMT siblings. When enabled -- see + prctl(PR_SCHED_CORE) -- task selection ensures that all SMT siblings + will execute a task from the same 'core group', forcing idle when no + matching task is found. + + Use of this feature includes: + - mitigation of some (not all) SMT side channels; + - limiting SMT interference to improve determinism and/or performance. + + SCHED_CORE is default disabled. When it is enabled and unused, + which is the likely usage by Linux distributions, there should + be no measurable impact on performance. + + diff --git a/kernel/audit.h b/kernel/audit.h index 1522e100fd17..b565ea16c0a5 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* audit -- definition of audit_context structure and supporting types +/* audit -- definition of audit_context structure and supporting types * * Copyright 2003-2004 Red Hat, Inc. * Copyright 2005 Hewlett-Packard Development Company, L.P. @@ -21,16 +21,16 @@ a per-task filter. At syscall entry, the audit_state is augmented by the syscall filter. */ enum audit_state { - AUDIT_DISABLED, /* Do not create per-task audit_context. + AUDIT_STATE_DISABLED, /* Do not create per-task audit_context. * No syscall-specific audit records can * be generated. */ - AUDIT_BUILD_CONTEXT, /* Create the per-task audit_context, + AUDIT_STATE_BUILD, /* Create the per-task audit_context, * and fill it in at syscall * entry time. This makes a full * syscall record available if some * other part of the kernel decides it * should be recorded. */ - AUDIT_RECORD_CONTEXT /* Create the per-task audit_context, + AUDIT_STATE_RECORD /* Create the per-task audit_context, * always fill it in at syscall entry * time, and always write out the audit * record at syscall exit time. */ @@ -322,7 +322,7 @@ static inline int audit_signal_info_syscall(struct task_struct *t) return 0; } -#define audit_filter_inodes(t, c) AUDIT_DISABLED +#define audit_filter_inodes(t, c) AUDIT_STATE_DISABLED #endif /* CONFIG_AUDITSYSCALL */ extern char *audit_unpack_string(void **bufp, size_t *remain, size_t len); diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 6c91902f4f45..b2be4e978ba3 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -689,8 +689,7 @@ void audit_trim_trees(void) tree = container_of(cursor.next, struct audit_tree, list); get_tree(tree); - list_del(&cursor); - list_add(&cursor, &tree->list); + list_move(&cursor, &tree->list); mutex_unlock(&audit_filter_mutex); err = kern_path(tree->pathname, 0, &path); @@ -899,8 +898,7 @@ int audit_tag_tree(char *old, char *new) tree = container_of(cursor.next, struct audit_tree, list); get_tree(tree); - list_del(&cursor); - list_add(&cursor, &tree->list); + list_move(&cursor, &tree->list); mutex_unlock(&audit_filter_mutex); err = kern_path(tree->pathname, 0, &path2); @@ -925,8 +923,7 @@ int audit_tag_tree(char *old, char *new) mutex_lock(&audit_filter_mutex); spin_lock(&hash_lock); if (!tree->goner) { - list_del(&tree->list); - list_add(&tree->list, &tree_list); + list_move(&tree->list, &tree_list); } spin_unlock(&hash_lock); put_tree(tree); @@ -937,8 +934,7 @@ int audit_tag_tree(char *old, char *new) tree = container_of(barrier.prev, struct audit_tree, list); get_tree(tree); - list_del(&tree->list); - list_add(&tree->list, &barrier); + list_move(&tree->list, &barrier); mutex_unlock(&audit_filter_mutex); if (!failed) { diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 175ef6f3ea4e..8dd73a64f921 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -160,6 +160,7 @@ static const struct audit_nfcfgop_tab audit_nfcfgs[] = { static int audit_match_perm(struct audit_context *ctx, int mask) { unsigned n; + if (unlikely(!ctx)) return 0; n = ctx->major; @@ -231,7 +232,7 @@ static void audit_set_auditable(struct audit_context *ctx) { if (!ctx->prio) { ctx->prio = 1; - ctx->current_state = AUDIT_RECORD_CONTEXT; + ctx->current_state = AUDIT_STATE_RECORD; } } @@ -239,6 +240,7 @@ static int put_tree_ref(struct audit_context *ctx, struct audit_chunk *chunk) { struct audit_tree_refs *p = ctx->trees; int left = ctx->tree_count; + if (likely(left)) { p->c[--left] = chunk; ctx->tree_count = left; @@ -259,6 +261,7 @@ static int put_tree_ref(struct audit_context *ctx, struct audit_chunk *chunk) static int grow_tree_refs(struct audit_context *ctx) { struct audit_tree_refs *p = ctx->trees; + ctx->trees = kzalloc(sizeof(struct audit_tree_refs), GFP_KERNEL); if (!ctx->trees) { ctx->trees = p; @@ -277,6 +280,7 @@ static void unroll_tree_refs(struct audit_context *ctx, { struct audit_tree_refs *q; int n; + if (!p) { /* we started with empty chain */ p = ctx->first_trees; @@ -303,6 +307,7 @@ static void unroll_tree_refs(struct audit_context *ctx, static void free_tree_refs(struct audit_context *ctx) { struct audit_tree_refs *p, *q; + for (p = ctx->first_trees; p; p = q) { q = p->next; kfree(p); @@ -313,6 +318,7 @@ static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree) { struct audit_tree_refs *p; int n; + if (!tree) return 0; /* full ones */ @@ -337,13 +343,13 @@ static int audit_compare_uid(kuid_t uid, { struct audit_names *n; int rc; - + if (name) { rc = audit_uid_comparator(uid, f->op, name->uid); if (rc) return rc; } - + if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { rc = audit_uid_comparator(uid, f->op, n->uid); @@ -361,13 +367,13 @@ static int audit_compare_gid(kgid_t gid, { struct audit_names *n; int rc; - + if (name) { rc = audit_gid_comparator(gid, f->op, name->gid); if (rc) return rc; } - + if (ctx) { list_for_each_entry(n, &ctx->names_list, list) { rc = audit_gid_comparator(gid, f->op, n->gid); @@ -751,10 +757,10 @@ static int audit_filter_rules(struct task_struct *tsk, } switch (rule->action) { case AUDIT_NEVER: - *state = AUDIT_DISABLED; + *state = AUDIT_STATE_DISABLED; break; case AUDIT_ALWAYS: - *state = AUDIT_RECORD_CONTEXT; + *state = AUDIT_STATE_RECORD; break; } return 1; @@ -773,14 +779,14 @@ static enum audit_state audit_filter_task(struct task_struct *tsk, char **key) list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_TASK], list) { if (audit_filter_rules(tsk, &e->rule, NULL, NULL, &state, true)) { - if (state == AUDIT_RECORD_CONTEXT) + if (state == AUDIT_STATE_RECORD) *key = kstrdup(e->rule.filterkey, GFP_ATOMIC); rcu_read_unlock(); return state; } } rcu_read_unlock(); - return AUDIT_BUILD_CONTEXT; + return AUDIT_STATE_BUILD; } static int audit_in_mask(const struct audit_krule *rule, unsigned long val) @@ -802,7 +808,7 @@ static int audit_in_mask(const struct audit_krule *rule, unsigned long val) /* At syscall exit time, this filter is called if the audit_state is * not low enough that auditing cannot take place, but is also not * high enough that we already know we have to write an audit record - * (i.e., the state is AUDIT_SETUP_CONTEXT or AUDIT_BUILD_CONTEXT). + * (i.e., the state is AUDIT_STATE_BUILD). */ static void audit_filter_syscall(struct task_struct *tsk, struct audit_context *ctx) @@ -923,7 +929,7 @@ static inline struct audit_context *audit_alloc_context(enum audit_state state) if (!context) return NULL; context->state = state; - context->prio = state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0; + context->prio = state == AUDIT_STATE_RECORD ? ~0ULL : 0; INIT_LIST_HEAD(&context->killed_trees); INIT_LIST_HEAD(&context->names_list); context->fds[0] = -1; @@ -950,7 +956,7 @@ int audit_alloc(struct task_struct *tsk) return 0; /* Return if not auditing. */ state = audit_filter_task(tsk, &key); - if (state == AUDIT_DISABLED) { + if (state == AUDIT_STATE_DISABLED) { clear_task_syscall_work(tsk, SYSCALL_AUDIT); return 0; } @@ -1225,6 +1231,7 @@ static void show_special(struct audit_context *context, int *call_panic) switch (context->type) { case AUDIT_SOCKETCALL: { int nargs = context->socketcall.nargs; + audit_log_format(ab, "nargs=%d", nargs); for (i = 0; i < nargs; i++) audit_log_format(ab, " a%d=%lx", i, @@ -1240,6 +1247,7 @@ static void show_special(struct audit_context *context, int *call_panic) if (osid) { char *ctx = NULL; u32 len; + if (security_secid_to_secctx(osid, &ctx, &len)) { audit_log_format(ab, " osid=%u", osid); *call_panic = 1; @@ -1289,6 +1297,7 @@ static void show_special(struct audit_context *context, int *call_panic) break; case AUDIT_MQ_GETSETATTR: { struct mq_attr *attr = &context->mq_getsetattr.mqstat; + audit_log_format(ab, "mqdes=%d mq_flags=0x%lx mq_maxmsg=%ld mq_msgsize=%ld " "mq_curmsgs=%ld ", @@ -1325,6 +1334,7 @@ static void show_special(struct audit_context *context, int *call_panic) static inline int audit_proctitle_rtrim(char *proctitle, int len) { char *end = proctitle + len - 1; + while (end > proctitle && !isprint(*end)) end--; @@ -1513,6 +1523,7 @@ static void audit_log_exit(void) case AUDIT_BPRM_FCAPS: { struct audit_aux_data_bprm_fcaps *axs = (void *)aux; + audit_log_format(ab, "fver=%x", axs->fcap_ver); audit_log_cap(ab, "fp", &axs->fcap.permitted); audit_log_cap(ab, "fi", &axs->fcap.inheritable); @@ -1628,7 +1639,7 @@ void __audit_free(struct task_struct *tsk) audit_filter_syscall(tsk, context); audit_filter_inodes(tsk, context); - if (context->current_state == AUDIT_RECORD_CONTEXT) + if (context->current_state == AUDIT_STATE_RECORD) audit_log_exit(); } @@ -1647,7 +1658,7 @@ void __audit_free(struct task_struct *tsk) * Fill in audit context at syscall entry. This only happens if the * audit context was created when the task was created and the state or * filters demand the audit context be built. If the state from the - * per-task filter or from the per-syscall filter is AUDIT_RECORD_CONTEXT, + * per-task filter or from the per-syscall filter is AUDIT_STATE_RECORD, * then the record will be written at syscall exit time (otherwise, it * will only be written if another part of the kernel requests that it * be written). @@ -1664,11 +1675,11 @@ void __audit_syscall_entry(int major, unsigned long a1, unsigned long a2, BUG_ON(context->in_syscall || context->name_count); state = context->state; - if (state == AUDIT_DISABLED) + if (state == AUDIT_STATE_DISABLED) return; context->dummy = !audit_n_rules; - if (!context->dummy && state == AUDIT_BUILD_CONTEXT) { + if (!context->dummy && state == AUDIT_STATE_BUILD) { context->prio = 0; if (auditd_test_task(current)) return; @@ -1693,7 +1704,7 @@ void __audit_syscall_entry(int major, unsigned long a1, unsigned long a2, * @return_code: return value of the syscall * * Tear down after system call. If the audit context has been marked as - * auditable (either because of the AUDIT_RECORD_CONTEXT state from + * auditable (either because of the AUDIT_STATE_RECORD state from * filtering, or because some other part of the kernel wrote an audit * message), then write out the syscall information. In call cases, * free the names stored from getname(). @@ -1735,12 +1746,12 @@ void __audit_syscall_exit(int success, long return_code) audit_filter_syscall(current, context); audit_filter_inodes(current, context); - if (context->current_state == AUDIT_RECORD_CONTEXT) + if (context->current_state == AUDIT_STATE_RECORD) audit_log_exit(); } context->in_syscall = 0; - context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0; + context->prio = context->state == AUDIT_STATE_RECORD ? ~0ULL : 0; audit_free_module(context); audit_free_names(context); @@ -1753,7 +1764,7 @@ void __audit_syscall_exit(int success, long return_code) context->sockaddr_len = 0; context->type = 0; context->fds[0] = -1; - if (context->state != AUDIT_RECORD_CONTEXT) { + if (context->state != AUDIT_STATE_RECORD) { kfree(context->filterkey); context->filterkey = NULL; } @@ -1765,6 +1776,7 @@ static inline void handle_one(const struct inode *inode) struct audit_tree_refs *p; struct audit_chunk *chunk; int count; + if (likely(!inode->i_fsnotify_marks)) return; context = audit_context(); @@ -1806,8 +1818,10 @@ retry: seq = read_seqbegin(&rename_lock); for(;;) { struct inode *inode = d_backing_inode(d); + if (inode && unlikely(inode->i_fsnotify_marks)) { struct audit_chunk *chunk; + chunk = audit_tree_lookup(inode); if (chunk) { if (unlikely(!put_tree_ref(context, chunk))) { @@ -2203,7 +2217,7 @@ int auditsc_get_stamp(struct audit_context *ctx, *serial = ctx->serial; if (!ctx->prio) { ctx->prio = 1; - ctx->current_state = AUDIT_RECORD_CONTEXT; + ctx->current_state = AUDIT_STATE_RECORD; } return 1; } @@ -2285,6 +2299,7 @@ void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) { struct audit_context *context = audit_context(); + context->mq_getsetattr.mqdes = mqdes; context->mq_getsetattr.mqstat = *mqstat; context->type = AUDIT_MQ_GETSETATTR; @@ -2298,6 +2313,7 @@ void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) void __audit_ipc_obj(struct kern_ipc_perm *ipcp) { struct audit_context *context = audit_context(); + context->ipc.uid = ipcp->uid; context->ipc.gid = ipcp->gid; context->ipc.mode = ipcp->mode; @@ -2362,6 +2378,7 @@ int __audit_socketcall(int nargs, unsigned long *args) void __audit_fd_pair(int fd1, int fd2) { struct audit_context *context = audit_context(); + context->fds[0] = fd1; context->fds[1] = fd2; } @@ -2379,6 +2396,7 @@ int __audit_sockaddr(int len, void *a) if (!context->sockaddr) { void *p = kmalloc(sizeof(struct sockaddr_storage), GFP_KERNEL); + if (!p) return -ENOMEM; context->sockaddr = p; @@ -2510,6 +2528,7 @@ int __audit_log_bprm_fcaps(struct linux_binprm *bprm, void __audit_log_capset(const struct cred *new, const struct cred *old) { struct audit_context *context = audit_context(); + context->capset.pid = task_tgid_nr(current); context->capset.cap.effective = new->cap_effective; context->capset.cap.inheritable = new->cap_effective; @@ -2521,6 +2540,7 @@ void __audit_log_capset(const struct cred *new, const struct cred *old) void __audit_mmap_fd(int fd, int flags) { struct audit_context *context = audit_context(); + context->mmap.fd = fd; context->mmap.flags = flags; context->type = AUDIT_MMAP; @@ -2686,6 +2706,7 @@ void audit_seccomp_actions_logged(const char *names, const char *old_names, struct list_head *audit_killed_trees(void) { struct audit_context *ctx = audit_context(); + if (likely(!ctx || !ctx->in_syscall)) return NULL; return &ctx->killed_trees; diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c index 2921ca39a93e..96ceed0e0fb5 100644 --- a/kernel/bpf/bpf_inode_storage.c +++ b/kernel/bpf/bpf_inode_storage.c @@ -72,7 +72,7 @@ void bpf_inode_storage_free(struct inode *inode) return; } - /* Netiher the bpf_prog nor the bpf-map's syscall + /* Neither the bpf_prog nor the bpf-map's syscall * could be modifying the local_storage->list now. * Thus, no elem can be added-to or deleted-from the * local_storage->list by the bpf_prog or by the bpf-map's syscall. diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index 931870f9cf56..2d4fbdbb194e 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -473,15 +473,16 @@ bool bpf_link_is_iter(struct bpf_link *link) return link->ops == &bpf_iter_link_lops; } -int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, + struct bpf_prog *prog) { - union bpf_iter_link_info __user *ulinfo; struct bpf_link_primer link_primer; struct bpf_iter_target_info *tinfo; union bpf_iter_link_info linfo; struct bpf_iter_link *link; u32 prog_btf_id, linfo_len; bool existed = false; + bpfptr_t ulinfo; int err; if (attr->link_create.target_fd || attr->link_create.flags) @@ -489,18 +490,18 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) memset(&linfo, 0, sizeof(union bpf_iter_link_info)); - ulinfo = u64_to_user_ptr(attr->link_create.iter_info); + ulinfo = make_bpfptr(attr->link_create.iter_info, uattr.is_kernel); linfo_len = attr->link_create.iter_info_len; - if (!ulinfo ^ !linfo_len) + if (bpfptr_is_null(ulinfo) ^ !linfo_len) return -EINVAL; - if (ulinfo) { + if (!bpfptr_is_null(ulinfo)) { err = bpf_check_uarg_tail_zero(ulinfo, sizeof(linfo), linfo_len); if (err) return err; linfo_len = min_t(u32, linfo_len, sizeof(linfo)); - if (copy_from_user(&linfo, ulinfo, linfo_len)) + if (copy_from_bpfptr(&linfo, ulinfo, linfo_len)) return -EFAULT; } diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c index da471bf01b97..06062370c3b8 100644 --- a/kernel/bpf/bpf_lsm.c +++ b/kernel/bpf/bpf_lsm.c @@ -127,7 +127,7 @@ bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) } /* The set of hooks which are called without pagefaults disabled and are allowed - * to "sleep" and thus can be used for sleeable BPF programs. + * to "sleep" and thus can be used for sleepable BPF programs. */ BTF_SET_START(sleepable_lsm_hooks) BTF_ID(func, bpf_lsm_bpf) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index f982a9f0dbc4..cb4b72997d9b 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -51,7 +51,7 @@ * The BTF type section contains a list of 'struct btf_type' objects. * Each one describes a C type. Recall from the above section * that a 'struct btf_type' object could be immediately followed by extra - * data in order to desribe some particular C types. + * data in order to describe some particular C types. * * type_id: * ~~~~~~~ @@ -1143,7 +1143,7 @@ static void *btf_show_obj_safe(struct btf_show *show, /* * We need a new copy to our safe object, either because we haven't - * yet copied and are intializing safe data, or because the data + * yet copied and are initializing safe data, or because the data * we want falls outside the boundaries of the safe object. */ if (!safe) { @@ -3417,7 +3417,7 @@ static struct btf_kind_operations func_proto_ops = { * BTF_KIND_FUNC_PROTO cannot be directly referred by * a struct's member. * - * It should be a funciton pointer instead. + * It should be a function pointer instead. * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO) * * Hence, there is no btf_func_check_member(). @@ -4257,7 +4257,7 @@ static int btf_parse_hdr(struct btf_verifier_env *env) return 0; } -static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size, +static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size, u32 log_level, char __user *log_ubuf, u32 log_size) { struct btf_verifier_env *env = NULL; @@ -4306,7 +4306,7 @@ static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size, btf->data = data; btf->data_size = btf_data_size; - if (copy_from_user(data, btf_data, btf_data_size)) { + if (copy_from_bpfptr(data, btf_data, btf_data_size)) { err = -EFAULT; goto errout; } @@ -5792,12 +5792,12 @@ static int __btf_new_fd(struct btf *btf) return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC); } -int btf_new_fd(const union bpf_attr *attr) +int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr) { struct btf *btf; int ret; - btf = btf_parse(u64_to_user_ptr(attr->btf), + btf = btf_parse(make_bpfptr(attr->btf, uattr.is_kernel), attr->btf_size, attr->btf_log_level, u64_to_user_ptr(attr->btf_log_buf), attr->btf_log_size); @@ -6097,3 +6097,65 @@ struct module *btf_try_get_module(const struct btf *btf) return res; } + +BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags) +{ + struct btf *btf; + long ret; + + if (flags) + return -EINVAL; + + if (name_sz <= 1 || name[name_sz - 1]) + return -EINVAL; + + btf = bpf_get_btf_vmlinux(); + if (IS_ERR(btf)) + return PTR_ERR(btf); + + ret = btf_find_by_name_kind(btf, name, kind); + /* ret is never zero, since btf_find_by_name_kind returns + * positive btf_id or negative error. + */ + if (ret < 0) { + struct btf *mod_btf; + int id; + + /* If name is not found in vmlinux's BTF then search in module's BTFs */ + spin_lock_bh(&btf_idr_lock); + idr_for_each_entry(&btf_idr, mod_btf, id) { + if (!btf_is_module(mod_btf)) + continue; + /* linear search could be slow hence unlock/lock + * the IDR to avoiding holding it for too long + */ + btf_get(mod_btf); + spin_unlock_bh(&btf_idr_lock); + ret = btf_find_by_name_kind(mod_btf, name, kind); + if (ret > 0) { + int btf_obj_fd; + + btf_obj_fd = __btf_new_fd(mod_btf); + if (btf_obj_fd < 0) { + btf_put(mod_btf); + return btf_obj_fd; + } + return ret | (((u64)btf_obj_fd) << 32); + } + spin_lock_bh(&btf_idr_lock); + btf_put(mod_btf); + } + spin_unlock_bh(&btf_idr_lock); + } + return ret; +} + +const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = { + .func = bpf_btf_find_by_name_kind, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_CONST_SIZE, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_ANYTHING, +}; diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 5e31ee9f7512..034ad93a1ad7 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1392,29 +1392,54 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn) select_insn: goto *jumptable[insn->code]; - /* ALU */ -#define ALU(OPCODE, OP) \ - ALU64_##OPCODE##_X: \ - DST = DST OP SRC; \ - CONT; \ - ALU_##OPCODE##_X: \ - DST = (u32) DST OP (u32) SRC; \ - CONT; \ - ALU64_##OPCODE##_K: \ - DST = DST OP IMM; \ - CONT; \ - ALU_##OPCODE##_K: \ - DST = (u32) DST OP (u32) IMM; \ + /* Explicitly mask the register-based shift amounts with 63 or 31 + * to avoid undefined behavior. Normally this won't affect the + * generated code, for example, in case of native 64 bit archs such + * as x86-64 or arm64, the compiler is optimizing the AND away for + * the interpreter. In case of JITs, each of the JIT backends compiles + * the BPF shift operations to machine instructions which produce + * implementation-defined results in such a case; the resulting + * contents of the register may be arbitrary, but program behaviour + * as a whole remains defined. In other words, in case of JIT backends, + * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation. + */ + /* ALU (shifts) */ +#define SHT(OPCODE, OP) \ + ALU64_##OPCODE##_X: \ + DST = DST OP (SRC & 63); \ + CONT; \ + ALU_##OPCODE##_X: \ + DST = (u32) DST OP ((u32) SRC & 31); \ + CONT; \ + ALU64_##OPCODE##_K: \ + DST = DST OP IMM; \ + CONT; \ + ALU_##OPCODE##_K: \ + DST = (u32) DST OP (u32) IMM; \ + CONT; + /* ALU (rest) */ +#define ALU(OPCODE, OP) \ + ALU64_##OPCODE##_X: \ + DST = DST OP SRC; \ + CONT; \ + ALU_##OPCODE##_X: \ + DST = (u32) DST OP (u32) SRC; \ + CONT; \ + ALU64_##OPCODE##_K: \ + DST = DST OP IMM; \ + CONT; \ + ALU_##OPCODE##_K: \ + DST = (u32) DST OP (u32) IMM; \ CONT; - ALU(ADD, +) ALU(SUB, -) ALU(AND, &) ALU(OR, |) - ALU(LSH, <<) - ALU(RSH, >>) ALU(XOR, ^) ALU(MUL, *) + SHT(LSH, <<) + SHT(RSH, >>) +#undef SHT #undef ALU ALU_NEG: DST = (u32) -DST; @@ -1439,13 +1464,13 @@ select_insn: insn++; CONT; ALU_ARSH_X: - DST = (u64) (u32) (((s32) DST) >> SRC); + DST = (u64) (u32) (((s32) DST) >> (SRC & 31)); CONT; ALU_ARSH_K: DST = (u64) (u32) (((s32) DST) >> IMM); CONT; ALU64_ARSH_X: - (*(s64 *) &DST) >>= SRC; + (*(s64 *) &DST) >>= (SRC & 63); CONT; ALU64_ARSH_K: (*(s64 *) &DST) >>= IMM; diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 5dd3e866599a..480e936c54d0 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -74,7 +74,7 @@ struct bpf_cpu_map_entry { struct bpf_cpu_map { struct bpf_map map; /* Below members specific for map type */ - struct bpf_cpu_map_entry **cpu_map; + struct bpf_cpu_map_entry __rcu **cpu_map; }; static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list); @@ -469,7 +469,7 @@ static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap, { struct bpf_cpu_map_entry *old_rcpu; - old_rcpu = xchg(&cmap->cpu_map[key_cpu], rcpu); + old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu))); if (old_rcpu) { call_rcu(&old_rcpu->rcu, __cpu_map_entry_free); INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop); @@ -551,7 +551,7 @@ static void cpu_map_free(struct bpf_map *map) for (i = 0; i < cmap->map.max_entries; i++) { struct bpf_cpu_map_entry *rcpu; - rcpu = READ_ONCE(cmap->cpu_map[i]); + rcpu = rcu_dereference_raw(cmap->cpu_map[i]); if (!rcpu) continue; @@ -562,6 +562,10 @@ static void cpu_map_free(struct bpf_map *map) kfree(cmap); } +/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or + * by local_bh_disable() (from XDP calls inside NAPI). The + * rcu_read_lock_bh_held() below makes lockdep accept both. + */ static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) { struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); @@ -570,7 +574,8 @@ static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) if (key >= map->max_entries) return NULL; - rcpu = READ_ONCE(cmap->cpu_map[key]); + rcpu = rcu_dereference_check(cmap->cpu_map[key], + rcu_read_lock_bh_held()); return rcpu; } @@ -601,7 +606,8 @@ static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key) static int cpu_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags) { - return __bpf_xdp_redirect_map(map, ifindex, flags, __cpu_map_lookup_elem); + return __bpf_xdp_redirect_map(map, ifindex, flags, 0, + __cpu_map_lookup_elem); } static int cpu_map_btf_id; diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index aa516472ce46..2546dafd6672 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -57,6 +57,7 @@ struct xdp_dev_bulk_queue { struct list_head flush_node; struct net_device *dev; struct net_device *dev_rx; + struct bpf_prog *xdp_prog; unsigned int count; }; @@ -72,7 +73,7 @@ struct bpf_dtab_netdev { struct bpf_dtab { struct bpf_map map; - struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */ + struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */ struct list_head list; /* these are only used for DEVMAP_HASH type maps */ @@ -92,7 +93,7 @@ static struct hlist_head *dev_map_create_hash(unsigned int entries, int i; struct hlist_head *hash; - hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node); + hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node); if (hash != NULL) for (i = 0; i < entries; i++) INIT_HLIST_HEAD(&hash[i]); @@ -143,7 +144,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) spin_lock_init(&dtab->index_lock); } else { - dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * + dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *), dtab->map.numa_node); if (!dtab->netdev_map) @@ -197,6 +198,7 @@ static void dev_map_free(struct bpf_map *map) list_del_rcu(&dtab->list); spin_unlock(&dev_map_lock); + bpf_clear_redirect_map(map); synchronize_rcu(); /* Make sure prior __dev_map_entry_free() have completed. */ @@ -224,7 +226,7 @@ static void dev_map_free(struct bpf_map *map) for (i = 0; i < dtab->map.max_entries; i++) { struct bpf_dtab_netdev *dev; - dev = dtab->netdev_map[i]; + dev = rcu_dereference_raw(dtab->netdev_map[i]); if (!dev) continue; @@ -257,6 +259,10 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) return 0; } +/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or + * by local_bh_disable() (from XDP calls inside NAPI). The + * rcu_read_lock_bh_held() below makes lockdep accept both. + */ static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) { struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); @@ -326,22 +332,69 @@ bool dev_map_can_have_prog(struct bpf_map *map) return false; } +static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog, + struct xdp_frame **frames, int n, + struct net_device *dev) +{ + struct xdp_txq_info txq = { .dev = dev }; + struct xdp_buff xdp; + int i, nframes = 0; + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + u32 act; + int err; + + xdp_convert_frame_to_buff(xdpf, &xdp); + xdp.txq = &txq; + + act = bpf_prog_run_xdp(xdp_prog, &xdp); + switch (act) { + case XDP_PASS: + err = xdp_update_frame_from_buff(&xdp, xdpf); + if (unlikely(err < 0)) + xdp_return_frame_rx_napi(xdpf); + else + frames[nframes++] = xdpf; + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(dev, xdp_prog, act); + fallthrough; + case XDP_DROP: + xdp_return_frame_rx_napi(xdpf); + break; + } + } + return nframes; /* sent frames count */ +} + static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) { struct net_device *dev = bq->dev; + unsigned int cnt = bq->count; int sent = 0, err = 0; + int to_send = cnt; int i; - if (unlikely(!bq->count)) + if (unlikely(!cnt)) return; - for (i = 0; i < bq->count; i++) { + for (i = 0; i < cnt; i++) { struct xdp_frame *xdpf = bq->q[i]; prefetch(xdpf); } - sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); + if (bq->xdp_prog) { + to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev); + if (!to_send) + goto out; + } + + sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags); if (sent < 0) { /* If ndo_xdp_xmit fails with an errno, no frames have * been xmit'ed. @@ -353,37 +406,34 @@ static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) /* If not all frames have been transmitted, it is our * responsibility to free them */ - for (i = sent; unlikely(i < bq->count); i++) + for (i = sent; unlikely(i < to_send); i++) xdp_return_frame_rx_napi(bq->q[i]); - trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, bq->count - sent, err); - bq->dev_rx = NULL; +out: bq->count = 0; - __list_del_clearprev(&bq->flush_node); -} - -/* __dev_flush is called from xdp_do_flush() which _must_ be signaled - * from the driver before returning from its napi->poll() routine. The poll() - * routine is called either from busy_poll context or net_rx_action signaled - * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the - * net device can be torn down. On devmap tear down we ensure the flush list - * is empty before completing to ensure all flush operations have completed. - * When drivers update the bpf program they may need to ensure any flush ops - * are also complete. Using synchronize_rcu or call_rcu will suffice for this - * because both wait for napi context to exit. + trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err); +} + +/* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the + * driver before returning from its napi->poll() routine. See the comment above + * xdp_do_flush() in filter.c. */ void __dev_flush(void) { struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); struct xdp_dev_bulk_queue *bq, *tmp; - list_for_each_entry_safe(bq, tmp, flush_list, flush_node) + list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { bq_xmit_all(bq, XDP_XMIT_FLUSH); + bq->dev_rx = NULL; + bq->xdp_prog = NULL; + __list_del_clearprev(&bq->flush_node); + } } -/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or - * update happens in parallel here a dev_put wont happen until after reading the - * ifindex. +/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or + * by local_bh_disable() (from XDP calls inside NAPI). The + * rcu_read_lock_bh_held() below makes lockdep accept both. */ static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key) { @@ -393,15 +443,17 @@ static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key) if (key >= map->max_entries) return NULL; - obj = READ_ONCE(dtab->netdev_map[key]); + obj = rcu_dereference_check(dtab->netdev_map[key], + rcu_read_lock_bh_held()); return obj; } -/* Runs under RCU-read-side, plus in softirq under NAPI protection. - * Thus, safe percpu variable access. +/* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu + * variable access, and map elements stick around. See comment above + * xdp_do_flush() in filter.c. */ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, - struct net_device *dev_rx) + struct net_device *dev_rx, struct bpf_prog *xdp_prog) { struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); @@ -412,18 +464,22 @@ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, /* Ingress dev_rx will be the same for all xdp_frame's in * bulk_queue, because bq stored per-CPU and must be flushed * from net_device drivers NAPI func end. + * + * Do the same with xdp_prog and flush_list since these fields + * are only ever modified together. */ - if (!bq->dev_rx) + if (!bq->dev_rx) { bq->dev_rx = dev_rx; + bq->xdp_prog = xdp_prog; + list_add(&bq->flush_node, flush_list); + } bq->q[bq->count++] = xdpf; - - if (!bq->flush_node.prev) - list_add(&bq->flush_node, flush_list); } static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, - struct net_device *dev_rx) + struct net_device *dev_rx, + struct bpf_prog *xdp_prog) { struct xdp_frame *xdpf; int err; @@ -439,55 +495,115 @@ static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, if (unlikely(!xdpf)) return -EOVERFLOW; - bq_enqueue(dev, xdpf, dev_rx); + bq_enqueue(dev, xdpf, dev_rx, xdp_prog); return 0; } -static struct xdp_buff *dev_map_run_prog(struct net_device *dev, - struct xdp_buff *xdp, - struct bpf_prog *xdp_prog) +int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, + struct net_device *dev_rx) { - struct xdp_txq_info txq = { .dev = dev }; - u32 act; + return __xdp_enqueue(dev, xdp, dev_rx, NULL); +} - xdp_set_data_meta_invalid(xdp); - xdp->txq = &txq; +int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, + struct net_device *dev_rx) +{ + struct net_device *dev = dst->dev; - act = bpf_prog_run_xdp(xdp_prog, xdp); - switch (act) { - case XDP_PASS: - return xdp; - case XDP_DROP: - break; - default: - bpf_warn_invalid_xdp_action(act); - fallthrough; - case XDP_ABORTED: - trace_xdp_exception(dev, xdp_prog, act); - break; - } + return __xdp_enqueue(dev, xdp, dev_rx, dst->xdp_prog); +} - xdp_return_buff(xdp); - return NULL; +static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_buff *xdp, + int exclude_ifindex) +{ + if (!obj || obj->dev->ifindex == exclude_ifindex || + !obj->dev->netdev_ops->ndo_xdp_xmit) + return false; + + if (xdp_ok_fwd_dev(obj->dev, xdp->data_end - xdp->data)) + return false; + + return true; } -int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, - struct net_device *dev_rx) +static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj, + struct net_device *dev_rx, + struct xdp_frame *xdpf) { - return __xdp_enqueue(dev, xdp, dev_rx); + struct xdp_frame *nxdpf; + + nxdpf = xdpf_clone(xdpf); + if (!nxdpf) + return -ENOMEM; + + bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog); + + return 0; } -int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, - struct net_device *dev_rx) +int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx, + struct bpf_map *map, bool exclude_ingress) { - struct net_device *dev = dst->dev; + struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); + int exclude_ifindex = exclude_ingress ? dev_rx->ifindex : 0; + struct bpf_dtab_netdev *dst, *last_dst = NULL; + struct hlist_head *head; + struct xdp_frame *xdpf; + unsigned int i; + int err; - if (dst->xdp_prog) { - xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog); - if (!xdp) - return 0; + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) + return -EOVERFLOW; + + if (map->map_type == BPF_MAP_TYPE_DEVMAP) { + for (i = 0; i < map->max_entries; i++) { + dst = READ_ONCE(dtab->netdev_map[i]); + if (!is_valid_dst(dst, xdp, exclude_ifindex)) + continue; + + /* we only need n-1 clones; last_dst enqueued below */ + if (!last_dst) { + last_dst = dst; + continue; + } + + err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf); + if (err) + return err; + + last_dst = dst; + } + } else { /* BPF_MAP_TYPE_DEVMAP_HASH */ + for (i = 0; i < dtab->n_buckets; i++) { + head = dev_map_index_hash(dtab, i); + hlist_for_each_entry_rcu(dst, head, index_hlist, + lockdep_is_held(&dtab->index_lock)) { + if (!is_valid_dst(dst, xdp, exclude_ifindex)) + continue; + + /* we only need n-1 clones; last_dst enqueued below */ + if (!last_dst) { + last_dst = dst; + continue; + } + + err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf); + if (err) + return err; + + last_dst = dst; + } + } } - return __xdp_enqueue(dev, xdp, dev_rx); + + /* consume the last copy of the frame */ + if (last_dst) + bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog); + else + xdp_return_frame_rx_napi(xdpf); /* dtab is empty */ + + return 0; } int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, @@ -504,6 +620,87 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, return 0; } +static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst, + struct sk_buff *skb, + struct bpf_prog *xdp_prog) +{ + struct sk_buff *nskb; + int err; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return -ENOMEM; + + err = dev_map_generic_redirect(dst, nskb, xdp_prog); + if (unlikely(err)) { + consume_skb(nskb); + return err; + } + + return 0; +} + +int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, + struct bpf_prog *xdp_prog, struct bpf_map *map, + bool exclude_ingress) +{ + struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); + int exclude_ifindex = exclude_ingress ? dev->ifindex : 0; + struct bpf_dtab_netdev *dst, *last_dst = NULL; + struct hlist_head *head; + struct hlist_node *next; + unsigned int i; + int err; + + if (map->map_type == BPF_MAP_TYPE_DEVMAP) { + for (i = 0; i < map->max_entries; i++) { + dst = READ_ONCE(dtab->netdev_map[i]); + if (!dst || dst->dev->ifindex == exclude_ifindex) + continue; + + /* we only need n-1 clones; last_dst enqueued below */ + if (!last_dst) { + last_dst = dst; + continue; + } + + err = dev_map_redirect_clone(last_dst, skb, xdp_prog); + if (err) + return err; + + last_dst = dst; + } + } else { /* BPF_MAP_TYPE_DEVMAP_HASH */ + for (i = 0; i < dtab->n_buckets; i++) { + head = dev_map_index_hash(dtab, i); + hlist_for_each_entry_safe(dst, next, head, index_hlist) { + if (!dst || dst->dev->ifindex == exclude_ifindex) + continue; + + /* we only need n-1 clones; last_dst enqueued below */ + if (!last_dst) { + last_dst = dst; + continue; + } + + err = dev_map_redirect_clone(last_dst, skb, xdp_prog); + if (err) + return err; + + last_dst = dst; + } + } + } + + /* consume the first skb and return */ + if (last_dst) + return dev_map_generic_redirect(last_dst, skb, xdp_prog); + + /* dtab is empty */ + consume_skb(skb); + return 0; +} + static void *dev_map_lookup_elem(struct bpf_map *map, void *key) { struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); @@ -538,14 +735,7 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key) if (k >= map->max_entries) return -EINVAL; - /* Use call_rcu() here to ensure any rcu critical sections have - * completed as well as any flush operations because call_rcu - * will wait for preempt-disable region to complete, NAPI in this - * context. And additionally, the driver tear down ensures all - * soft irqs are complete before removing the net device in the - * case of dev_put equals zero. - */ - old_dev = xchg(&dtab->netdev_map[k], NULL); + old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL)); if (old_dev) call_rcu(&old_dev->rcu, __dev_map_entry_free); return 0; @@ -654,7 +844,7 @@ static int __dev_map_update_elem(struct net *net, struct bpf_map *map, * Remembering the driver side flush operation will happen before the * net device is removed. */ - old_dev = xchg(&dtab->netdev_map[i], dev); + old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev))); if (old_dev) call_rcu(&old_dev->rcu, __dev_map_entry_free); @@ -730,12 +920,16 @@ static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags) { - return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_lookup_elem); + return __bpf_xdp_redirect_map(map, ifindex, flags, + BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS, + __dev_map_lookup_elem); } static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags) { - return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_hash_lookup_elem); + return __bpf_xdp_redirect_map(map, ifindex, flags, + BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS, + __dev_map_hash_lookup_elem); } static int dev_map_btf_id; @@ -830,10 +1024,10 @@ static int dev_map_notification(struct notifier_block *notifier, for (i = 0; i < dtab->map.max_entries; i++) { struct bpf_dtab_netdev *dev, *odev; - dev = READ_ONCE(dtab->netdev_map[i]); + dev = rcu_dereference(dtab->netdev_map[i]); if (!dev || netdev != dev->dev) continue; - odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); + odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL)); if (dev == odev) call_rcu(&dev->rcu, __dev_map_entry_free); diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index d7ebb12ffffc..72c58cc516a3 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -46,12 +46,12 @@ * events, kprobes and tracing to be invoked before the prior invocation * from one of these contexts completed. sys_bpf() uses the same mechanism * by pinning the task to the current CPU and incrementing the recursion - * protection accross the map operation. + * protection across the map operation. * * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain * operations like memory allocations (even with GFP_ATOMIC) from atomic * contexts. This is required because even with GFP_ATOMIC the memory - * allocator calls into code pathes which acquire locks with long held lock + * allocator calls into code paths which acquire locks with long held lock * sections. To ensure the deterministic behaviour these locks are regular * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only * true atomic contexts on an RT kernel are the low level hardware @@ -596,7 +596,8 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) struct htab_elem *l; u32 hash, key_size; - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held()); + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && + !rcu_read_lock_bh_held()); key_size = map->key_size; @@ -989,7 +990,8 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, /* unknown flags */ return -EINVAL; - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held()); + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && + !rcu_read_lock_bh_held()); key_size = map->key_size; @@ -1082,7 +1084,8 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, /* unknown flags */ return -EINVAL; - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held()); + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && + !rcu_read_lock_bh_held()); key_size = map->key_size; @@ -1148,7 +1151,8 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, /* unknown flags */ return -EINVAL; - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held()); + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && + !rcu_read_lock_bh_held()); key_size = map->key_size; @@ -1202,7 +1206,8 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, /* unknown flags */ return -EINVAL; - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held()); + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && + !rcu_read_lock_bh_held()); key_size = map->key_size; @@ -1276,7 +1281,8 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key) u32 hash, key_size; int ret; - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held()); + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && + !rcu_read_lock_bh_held()); key_size = map->key_size; @@ -1311,7 +1317,8 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) u32 hash, key_size; int ret; - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held()); + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && + !rcu_read_lock_bh_held()); key_size = map->key_size; @@ -1401,6 +1408,100 @@ static void htab_map_seq_show_elem(struct bpf_map *map, void *key, rcu_read_unlock(); } +static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, + void *value, bool is_lru_map, + bool is_percpu, u64 flags) +{ + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct hlist_nulls_head *head; + unsigned long bflags; + struct htab_elem *l; + u32 hash, key_size; + struct bucket *b; + int ret; + + key_size = map->key_size; + + hash = htab_map_hash(key, key_size, htab->hashrnd); + b = __select_bucket(htab, hash); + head = &b->head; + + ret = htab_lock_bucket(htab, b, hash, &bflags); + if (ret) + return ret; + + l = lookup_elem_raw(head, hash, key, key_size); + if (!l) { + ret = -ENOENT; + } else { + if (is_percpu) { + u32 roundup_value_size = round_up(map->value_size, 8); + void __percpu *pptr; + int off = 0, cpu; + + pptr = htab_elem_get_ptr(l, key_size); + for_each_possible_cpu(cpu) { + bpf_long_memcpy(value + off, + per_cpu_ptr(pptr, cpu), + roundup_value_size); + off += roundup_value_size; + } + } else { + u32 roundup_key_size = round_up(map->key_size, 8); + + if (flags & BPF_F_LOCK) + copy_map_value_locked(map, value, l->key + + roundup_key_size, + true); + else + copy_map_value(map, value, l->key + + roundup_key_size); + check_and_init_map_lock(map, value); + } + + hlist_nulls_del_rcu(&l->hash_node); + if (!is_lru_map) + free_htab_elem(htab, l); + } + + htab_unlock_bucket(htab, b, hash, bflags); + + if (is_lru_map && l) + bpf_lru_push_free(&htab->lru, &l->lru_node); + + return ret; +} + +static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, + void *value, u64 flags) +{ + return __htab_map_lookup_and_delete_elem(map, key, value, false, false, + flags); +} + +static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map, + void *key, void *value, + u64 flags) +{ + return __htab_map_lookup_and_delete_elem(map, key, value, false, true, + flags); +} + +static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key, + void *value, u64 flags) +{ + return __htab_map_lookup_and_delete_elem(map, key, value, true, false, + flags); +} + +static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map, + void *key, void *value, + u64 flags) +{ + return __htab_map_lookup_and_delete_elem(map, key, value, true, true, + flags); +} + static int __htab_map_lookup_and_delete_batch(struct bpf_map *map, const union bpf_attr *attr, @@ -1934,6 +2035,7 @@ const struct bpf_map_ops htab_map_ops = { .map_free = htab_map_free, .map_get_next_key = htab_map_get_next_key, .map_lookup_elem = htab_map_lookup_elem, + .map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem, .map_update_elem = htab_map_update_elem, .map_delete_elem = htab_map_delete_elem, .map_gen_lookup = htab_map_gen_lookup, @@ -1954,6 +2056,7 @@ const struct bpf_map_ops htab_lru_map_ops = { .map_free = htab_map_free, .map_get_next_key = htab_map_get_next_key, .map_lookup_elem = htab_lru_map_lookup_elem, + .map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem, .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys, .map_update_elem = htab_lru_map_update_elem, .map_delete_elem = htab_lru_map_delete_elem, @@ -2077,6 +2180,7 @@ const struct bpf_map_ops htab_percpu_map_ops = { .map_free = htab_map_free, .map_get_next_key = htab_map_get_next_key, .map_lookup_elem = htab_percpu_map_lookup_elem, + .map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem, .map_update_elem = htab_percpu_map_update_elem, .map_delete_elem = htab_map_delete_elem, .map_seq_show_elem = htab_percpu_map_seq_show_elem, @@ -2096,6 +2200,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = { .map_free = htab_map_free, .map_get_next_key = htab_map_get_next_key, .map_lookup_elem = htab_lru_percpu_map_lookup_elem, + .map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem, .map_update_elem = htab_lru_percpu_map_update_elem, .map_delete_elem = htab_lru_map_delete_elem, .map_seq_show_elem = htab_percpu_map_seq_show_elem, diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 73443498d88f..62cf00383910 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -14,6 +14,7 @@ #include <linux/jiffies.h> #include <linux/pid_namespace.h> #include <linux/proc_ns.h> +#include <linux/security.h> #include "../../lib/kstrtox.h" @@ -28,7 +29,7 @@ */ BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) { - WARN_ON_ONCE(!rcu_read_lock_held()); + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); return (unsigned long) map->ops->map_lookup_elem(map, key); } @@ -44,7 +45,7 @@ const struct bpf_func_proto bpf_map_lookup_elem_proto = { BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, void *, value, u64, flags) { - WARN_ON_ONCE(!rcu_read_lock_held()); + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); return map->ops->map_update_elem(map, key, value, flags); } @@ -61,7 +62,7 @@ const struct bpf_func_proto bpf_map_update_elem_proto = { BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) { - WARN_ON_ONCE(!rcu_read_lock_held()); + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); return map->ops->map_delete_elem(map, key); } @@ -1069,11 +1070,13 @@ bpf_base_func_proto(enum bpf_func_id func_id) case BPF_FUNC_probe_read_user: return &bpf_probe_read_user_proto; case BPF_FUNC_probe_read_kernel: - return &bpf_probe_read_kernel_proto; + return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? + NULL : &bpf_probe_read_kernel_proto; case BPF_FUNC_probe_read_user_str: return &bpf_probe_read_user_str_proto; case BPF_FUNC_probe_read_kernel_str: - return &bpf_probe_read_kernel_str_proto; + return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? + NULL : &bpf_probe_read_kernel_str_proto; case BPF_FUNC_snprintf_btf: return &bpf_snprintf_btf_proto; case BPF_FUNC_snprintf: diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index b4ebd60a6c16..80da1db47c68 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -543,7 +543,7 @@ int bpf_obj_get_user(const char __user *pathname, int flags) return PTR_ERR(raw); if (type == BPF_TYPE_PROG) - ret = (f_flags != O_RDWR) ? -EINVAL : bpf_prog_new_fd(raw); + ret = bpf_prog_new_fd(raw); else if (type == BPF_TYPE_MAP) ret = bpf_map_new_fd(raw, f_flags); else if (type == BPF_TYPE_LINK) diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index 1b7b8a6f34ee..423549d2c52e 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -232,7 +232,8 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key) /* Start walking the trie from the root node ... */ - for (node = rcu_dereference(trie->root); node;) { + for (node = rcu_dereference_check(trie->root, rcu_read_lock_bh_held()); + node;) { unsigned int next_bit; size_t matchlen; @@ -264,7 +265,8 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key) * traverse down. */ next_bit = extract_bit(key->data, node->prefixlen); - node = rcu_dereference(node->child[next_bit]); + node = rcu_dereference_check(node->child[next_bit], + rcu_read_lock_bh_held()); } if (!found) diff --git a/kernel/bpf/preload/iterators/iterators.bpf.c b/kernel/bpf/preload/iterators/iterators.bpf.c index 52aa7b38e8b8..03af863314ea 100644 --- a/kernel/bpf/preload/iterators/iterators.bpf.c +++ b/kernel/bpf/preload/iterators/iterators.bpf.c @@ -2,7 +2,6 @@ /* Copyright (c) 2020 Facebook */ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> -#include <bpf/bpf_tracing.h> #include <bpf/bpf_core_read.h> #pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record) diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c index 4838922f723d..93a55391791a 100644 --- a/kernel/bpf/reuseport_array.c +++ b/kernel/bpf/reuseport_array.c @@ -102,7 +102,7 @@ static void reuseport_array_free(struct bpf_map *map) /* * ops->map_*_elem() will not be able to access this * array now. Hence, this function only races with - * bpf_sk_reuseport_detach() which was triggerred by + * bpf_sk_reuseport_detach() which was triggered by * close() or disconnect(). * * This function and bpf_sk_reuseport_detach() are diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c index 84b3b35fc0d0..9e0c10c6892a 100644 --- a/kernel/bpf/ringbuf.c +++ b/kernel/bpf/ringbuf.c @@ -8,6 +8,7 @@ #include <linux/vmalloc.h> #include <linux/wait.h> #include <linux/poll.h> +#include <linux/kmemleak.h> #include <uapi/linux/btf.h> #define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE) @@ -105,6 +106,7 @@ static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node) rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages, VM_ALLOC | VM_USERMAP, PAGE_KERNEL); if (rb) { + kmemleak_not_leak(pages); rb->pages = pages; rb->nr_pages = nr_pages; return rb; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index ea04b0deb5ce..e343f158e556 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -73,11 +73,10 @@ static const struct bpf_map_ops * const bpf_map_types[] = { * copy_from_user() call. However, this is not a concern since this function is * meant to be a future-proofing of bits. */ -int bpf_check_uarg_tail_zero(void __user *uaddr, +int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size, size_t actual_size) { - unsigned char __user *addr = uaddr + expected_size; int res; if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ @@ -86,7 +85,12 @@ int bpf_check_uarg_tail_zero(void __user *uaddr, if (actual_size <= expected_size) return 0; - res = check_zeroed_user(addr, actual_size - expected_size); + if (uaddr.is_kernel) + res = memchr_inv(uaddr.kernel + expected_size, 0, + actual_size - expected_size) == NULL; + else + res = check_zeroed_user(uaddr.user + expected_size, + actual_size - expected_size); if (res < 0) return res; return res ? 0 : -E2BIG; @@ -1005,6 +1009,17 @@ static void *__bpf_copy_key(void __user *ukey, u64 key_size) return NULL; } +static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size) +{ + if (key_size) + return memdup_bpfptr(ukey, key_size); + + if (!bpfptr_is_null(ukey)) + return ERR_PTR(-EINVAL); + + return NULL; +} + /* last field in 'union bpf_attr' used by this command */ #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags @@ -1075,10 +1090,10 @@ err_put: #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags -static int map_update_elem(union bpf_attr *attr) +static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) { - void __user *ukey = u64_to_user_ptr(attr->key); - void __user *uvalue = u64_to_user_ptr(attr->value); + bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); + bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); int ufd = attr->map_fd; struct bpf_map *map; void *key, *value; @@ -1104,7 +1119,7 @@ static int map_update_elem(union bpf_attr *attr) goto err_put; } - key = __bpf_copy_key(ukey, map->key_size); + key = ___bpf_copy_key(ukey, map->key_size); if (IS_ERR(key)) { err = PTR_ERR(key); goto err_put; @@ -1124,7 +1139,7 @@ static int map_update_elem(union bpf_attr *attr) goto free_key; err = -EFAULT; - if (copy_from_user(value, uvalue, value_size) != 0) + if (copy_from_bpfptr(value, uvalue, value_size) != 0) goto free_value; err = bpf_map_update_value(map, f, key, value, attr->flags); @@ -1469,7 +1484,7 @@ free_buf: return err; } -#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value +#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags static int map_lookup_and_delete_elem(union bpf_attr *attr) { @@ -1485,6 +1500,9 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr) if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) return -EINVAL; + if (attr->flags & ~BPF_F_LOCK) + return -EINVAL; + f = fdget(ufd); map = __bpf_map_get(f); if (IS_ERR(map)) @@ -1495,24 +1513,47 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr) goto err_put; } + if (attr->flags && + (map->map_type == BPF_MAP_TYPE_QUEUE || + map->map_type == BPF_MAP_TYPE_STACK)) { + err = -EINVAL; + goto err_put; + } + + if ((attr->flags & BPF_F_LOCK) && + !map_value_has_spin_lock(map)) { + err = -EINVAL; + goto err_put; + } + key = __bpf_copy_key(ukey, map->key_size); if (IS_ERR(key)) { err = PTR_ERR(key); goto err_put; } - value_size = map->value_size; + value_size = bpf_map_value_size(map); err = -ENOMEM; value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); if (!value) goto free_key; + err = -ENOTSUPP; if (map->map_type == BPF_MAP_TYPE_QUEUE || map->map_type == BPF_MAP_TYPE_STACK) { err = map->ops->map_pop_elem(map, value); - } else { - err = -ENOTSUPP; + } else if (map->map_type == BPF_MAP_TYPE_HASH || + map->map_type == BPF_MAP_TYPE_PERCPU_HASH || + map->map_type == BPF_MAP_TYPE_LRU_HASH || + map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { + if (!bpf_map_is_dev_bound(map)) { + bpf_disable_instrumentation(); + rcu_read_lock(); + err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); + rcu_read_unlock(); + bpf_enable_instrumentation(); + } } if (err) @@ -1932,6 +1973,11 @@ static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) attr->expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE; break; + case BPF_PROG_TYPE_SK_REUSEPORT: + if (!attr->expected_attach_type) + attr->expected_attach_type = + BPF_SK_REUSEPORT_SELECT; + break; } } @@ -2015,6 +2061,15 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type, if (expected_attach_type == BPF_SK_LOOKUP) return 0; return -EINVAL; + case BPF_PROG_TYPE_SK_REUSEPORT: + switch (expected_attach_type) { + case BPF_SK_REUSEPORT_SELECT: + case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: + return 0; + default: + return -EINVAL; + } + case BPF_PROG_TYPE_SYSCALL: case BPF_PROG_TYPE_EXT: if (expected_attach_type) return -EINVAL; @@ -2074,9 +2129,9 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) } /* last field in 'union bpf_attr' used by this command */ -#define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd +#define BPF_PROG_LOAD_LAST_FIELD fd_array -static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) +static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) { enum bpf_prog_type type = attr->prog_type; struct bpf_prog *prog, *dst_prog = NULL; @@ -2101,8 +2156,9 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) return -EPERM; /* copy eBPF program license from user space */ - if (strncpy_from_user(license, u64_to_user_ptr(attr->license), - sizeof(license) - 1) < 0) + if (strncpy_from_bpfptr(license, + make_bpfptr(attr->license, uattr.is_kernel), + sizeof(license) - 1) < 0) return -EFAULT; license[sizeof(license) - 1] = 0; @@ -2186,8 +2242,9 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) prog->len = attr->insn_cnt; err = -EFAULT; - if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns), - bpf_prog_insn_size(prog)) != 0) + if (copy_from_bpfptr(prog->insns, + make_bpfptr(attr->insns, uattr.is_kernel), + bpf_prog_insn_size(prog)) != 0) goto free_prog_sec; prog->orig_prog = NULL; @@ -3423,7 +3480,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, u32 ulen; int err; - err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); + err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); if (err) return err; info_len = min_t(u32, sizeof(info), info_len); @@ -3702,7 +3759,7 @@ static int bpf_map_get_info_by_fd(struct file *file, u32 info_len = attr->info.info_len; int err; - err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); + err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); if (err) return err; info_len = min_t(u32, sizeof(info), info_len); @@ -3745,7 +3802,7 @@ static int bpf_btf_get_info_by_fd(struct file *file, u32 info_len = attr->info.info_len; int err; - err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len); + err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len); if (err) return err; @@ -3762,7 +3819,7 @@ static int bpf_link_get_info_by_fd(struct file *file, u32 info_len = attr->info.info_len; int err; - err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); + err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); if (err) return err; info_len = min_t(u32, sizeof(info), info_len); @@ -3825,7 +3882,7 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, #define BPF_BTF_LOAD_LAST_FIELD btf_log_level -static int bpf_btf_load(const union bpf_attr *attr) +static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr) { if (CHECK_ATTR(BPF_BTF_LOAD)) return -EINVAL; @@ -3833,7 +3890,7 @@ static int bpf_btf_load(const union bpf_attr *attr) if (!bpf_capable()) return -EPERM; - return btf_new_fd(attr); + return btf_new_fd(attr, uattr); } #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id @@ -4023,13 +4080,14 @@ err_put: return err; } -static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +static int tracing_bpf_link_attach(const union bpf_attr *attr, bpfptr_t uattr, + struct bpf_prog *prog) { if (attr->link_create.attach_type != prog->expected_attach_type) return -EINVAL; if (prog->expected_attach_type == BPF_TRACE_ITER) - return bpf_iter_link_attach(attr, prog); + return bpf_iter_link_attach(attr, uattr, prog); else if (prog->type == BPF_PROG_TYPE_EXT) return bpf_tracing_prog_attach(prog, attr->link_create.target_fd, @@ -4038,7 +4096,7 @@ static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog * } #define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len -static int link_create(union bpf_attr *attr) +static int link_create(union bpf_attr *attr, bpfptr_t uattr) { enum bpf_prog_type ptype; struct bpf_prog *prog; @@ -4057,7 +4115,7 @@ static int link_create(union bpf_attr *attr) goto out; if (prog->type == BPF_PROG_TYPE_EXT) { - ret = tracing_bpf_link_attach(attr, prog); + ret = tracing_bpf_link_attach(attr, uattr, prog); goto out; } @@ -4078,7 +4136,7 @@ static int link_create(union bpf_attr *attr) ret = cgroup_bpf_link_attach(attr, prog); break; case BPF_PROG_TYPE_TRACING: - ret = tracing_bpf_link_attach(attr, prog); + ret = tracing_bpf_link_attach(attr, uattr, prog); break; case BPF_PROG_TYPE_FLOW_DISSECTOR: case BPF_PROG_TYPE_SK_LOOKUP: @@ -4366,7 +4424,7 @@ out_prog_put: return ret; } -SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) +static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size) { union bpf_attr attr; int err; @@ -4381,7 +4439,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz /* copy attributes from user space, may be less than sizeof(bpf_attr) */ memset(&attr, 0, sizeof(attr)); - if (copy_from_user(&attr, uattr, size) != 0) + if (copy_from_bpfptr(&attr, uattr, size) != 0) return -EFAULT; err = security_bpf(cmd, &attr, size); @@ -4396,7 +4454,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz err = map_lookup_elem(&attr); break; case BPF_MAP_UPDATE_ELEM: - err = map_update_elem(&attr); + err = map_update_elem(&attr, uattr); break; case BPF_MAP_DELETE_ELEM: err = map_delete_elem(&attr); @@ -4423,21 +4481,21 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz err = bpf_prog_detach(&attr); break; case BPF_PROG_QUERY: - err = bpf_prog_query(&attr, uattr); + err = bpf_prog_query(&attr, uattr.user); break; case BPF_PROG_TEST_RUN: - err = bpf_prog_test_run(&attr, uattr); + err = bpf_prog_test_run(&attr, uattr.user); break; case BPF_PROG_GET_NEXT_ID: - err = bpf_obj_get_next_id(&attr, uattr, + err = bpf_obj_get_next_id(&attr, uattr.user, &prog_idr, &prog_idr_lock); break; case BPF_MAP_GET_NEXT_ID: - err = bpf_obj_get_next_id(&attr, uattr, + err = bpf_obj_get_next_id(&attr, uattr.user, &map_idr, &map_idr_lock); break; case BPF_BTF_GET_NEXT_ID: - err = bpf_obj_get_next_id(&attr, uattr, + err = bpf_obj_get_next_id(&attr, uattr.user, &btf_idr, &btf_idr_lock); break; case BPF_PROG_GET_FD_BY_ID: @@ -4447,38 +4505,38 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz err = bpf_map_get_fd_by_id(&attr); break; case BPF_OBJ_GET_INFO_BY_FD: - err = bpf_obj_get_info_by_fd(&attr, uattr); + err = bpf_obj_get_info_by_fd(&attr, uattr.user); break; case BPF_RAW_TRACEPOINT_OPEN: err = bpf_raw_tracepoint_open(&attr); break; case BPF_BTF_LOAD: - err = bpf_btf_load(&attr); + err = bpf_btf_load(&attr, uattr); break; case BPF_BTF_GET_FD_BY_ID: err = bpf_btf_get_fd_by_id(&attr); break; case BPF_TASK_FD_QUERY: - err = bpf_task_fd_query(&attr, uattr); + err = bpf_task_fd_query(&attr, uattr.user); break; case BPF_MAP_LOOKUP_AND_DELETE_ELEM: err = map_lookup_and_delete_elem(&attr); break; case BPF_MAP_LOOKUP_BATCH: - err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH); + err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH); break; case BPF_MAP_LOOKUP_AND_DELETE_BATCH: - err = bpf_map_do_batch(&attr, uattr, + err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_AND_DELETE_BATCH); break; case BPF_MAP_UPDATE_BATCH: - err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH); + err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH); break; case BPF_MAP_DELETE_BATCH: - err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH); + err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH); break; case BPF_LINK_CREATE: - err = link_create(&attr); + err = link_create(&attr, uattr); break; case BPF_LINK_UPDATE: err = link_update(&attr); @@ -4487,7 +4545,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz err = bpf_link_get_fd_by_id(&attr); break; case BPF_LINK_GET_NEXT_ID: - err = bpf_obj_get_next_id(&attr, uattr, + err = bpf_obj_get_next_id(&attr, uattr.user, &link_idr, &link_idr_lock); break; case BPF_ENABLE_STATS: @@ -4509,3 +4567,94 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz return err; } + +SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) +{ + return __sys_bpf(cmd, USER_BPFPTR(uattr), size); +} + +static bool syscall_prog_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + if (off < 0 || off >= U16_MAX) + return false; + if (off % size != 0) + return false; + return true; +} + +BPF_CALL_3(bpf_sys_bpf, int, cmd, void *, attr, u32, attr_size) +{ + switch (cmd) { + case BPF_MAP_CREATE: + case BPF_MAP_UPDATE_ELEM: + case BPF_MAP_FREEZE: + case BPF_PROG_LOAD: + case BPF_BTF_LOAD: + break; + /* case BPF_PROG_TEST_RUN: + * is not part of this list to prevent recursive test_run + */ + default: + return -EINVAL; + } + return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size); +} + +static const struct bpf_func_proto bpf_sys_bpf_proto = { + .func = bpf_sys_bpf, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_ANYTHING, + .arg2_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_CONST_SIZE, +}; + +const struct bpf_func_proto * __weak +tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + return bpf_base_func_proto(func_id); +} + +BPF_CALL_1(bpf_sys_close, u32, fd) +{ + /* When bpf program calls this helper there should not be + * an fdget() without matching completed fdput(). + * This helper is allowed in the following callchain only: + * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close + */ + return close_fd(fd); +} + +static const struct bpf_func_proto bpf_sys_close_proto = { + .func = bpf_sys_close, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_ANYTHING, +}; + +static const struct bpf_func_proto * +syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + switch (func_id) { + case BPF_FUNC_sys_bpf: + return &bpf_sys_bpf_proto; + case BPF_FUNC_btf_find_by_name_kind: + return &bpf_btf_find_by_name_kind_proto; + case BPF_FUNC_sys_close: + return &bpf_sys_close_proto; + default: + return tracing_prog_func_proto(func_id, prog); + } +} + +const struct bpf_verifier_ops bpf_syscall_verifier_ops = { + .get_func_proto = syscall_prog_func_proto, + .is_valid_access = syscall_prog_is_valid_access, +}; + +const struct bpf_prog_ops bpf_syscall_prog_ops = { + .test_run = bpf_prog_test_run_syscall, +}; diff --git a/kernel/bpf/tnum.c b/kernel/bpf/tnum.c index ceac5281bd31..3d7127f439a1 100644 --- a/kernel/bpf/tnum.c +++ b/kernel/bpf/tnum.c @@ -111,28 +111,31 @@ struct tnum tnum_xor(struct tnum a, struct tnum b) return TNUM(v & ~mu, mu); } -/* half-multiply add: acc += (unknown * mask * value). - * An intermediate step in the multiply algorithm. +/* Generate partial products by multiplying each bit in the multiplier (tnum a) + * with the multiplicand (tnum b), and add the partial products after + * appropriately bit-shifting them. Instead of directly performing tnum addition + * on the generated partial products, equivalenty, decompose each partial + * product into two tnums, consisting of the value-sum (acc_v) and the + * mask-sum (acc_m) and then perform tnum addition on them. The following paper + * explains the algorithm in more detail: https://arxiv.org/abs/2105.05398. */ -static struct tnum hma(struct tnum acc, u64 value, u64 mask) -{ - while (mask) { - if (mask & 1) - acc = tnum_add(acc, TNUM(0, value)); - mask >>= 1; - value <<= 1; - } - return acc; -} - struct tnum tnum_mul(struct tnum a, struct tnum b) { - struct tnum acc; - u64 pi; - - pi = a.value * b.value; - acc = hma(TNUM(pi, 0), a.mask, b.mask | b.value); - return hma(acc, b.mask, a.value); + u64 acc_v = a.value * b.value; + struct tnum acc_m = TNUM(0, 0); + + while (a.value || a.mask) { + /* LSB of tnum a is a certain 1 */ + if (a.value & 1) + acc_m = tnum_add(acc_m, TNUM(0, b.mask)); + /* LSB of tnum a is uncertain */ + else if (a.mask & 1) + acc_m = tnum_add(acc_m, TNUM(0, b.value | b.mask)); + /* Note: no case for LSB is certain 0 */ + a = tnum_rshift(a, 1); + b = tnum_lshift(b, 1); + } + return tnum_add(TNUM(acc_v, 0), acc_m); } /* Note that if a and b disagree - i.e. one has a 'known 1' where the other has diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 2d44b5aa0057..28a3630c48ee 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -552,7 +552,7 @@ static void notrace inc_misses_counter(struct bpf_prog *prog) * __bpf_prog_enter returns: * 0 - skip execution of the bpf prog * 1 - execute bpf prog - * [2..MAX_U64] - excute bpf prog and record execution time. + * [2..MAX_U64] - execute bpf prog and record execution time. * This is start time. */ u64 notrace __bpf_prog_enter(struct bpf_prog *prog) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 94ba5163d4c5..be38bb930bf1 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -47,7 +47,7 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { * - unreachable insns exist (shouldn't be a forest. program = one function) * - out of bounds or malformed jumps * The second pass is all possible path descent from the 1st insn. - * Since it's analyzing all pathes through the program, the length of the + * Since it's analyzing all paths through the program, the length of the * analysis is limited to 64k insn, which may be hit even if total number of * insn is less then 4K, but there are too many branches that change stack/regs. * Number of 'branches to be analyzed' is limited to 1k @@ -132,7 +132,7 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { * If it's ok, then verifier allows this BPF_CALL insn and looks at * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function - * returns ether pointer to map value or NULL. + * returns either pointer to map value or NULL. * * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' * insn, the register holding that pointer in the true branch changes state to @@ -737,81 +737,104 @@ static void print_verifier_state(struct bpf_verifier_env *env, verbose(env, "\n"); } -#define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \ -static int copy_##NAME##_state(struct bpf_func_state *dst, \ - const struct bpf_func_state *src) \ -{ \ - if (!src->FIELD) \ - return 0; \ - if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \ - /* internal bug, make state invalid to reject the program */ \ - memset(dst, 0, sizeof(*dst)); \ - return -EFAULT; \ - } \ - memcpy(dst->FIELD, src->FIELD, \ - sizeof(*src->FIELD) * (src->COUNT / SIZE)); \ - return 0; \ -} -/* copy_reference_state() */ -COPY_STATE_FN(reference, acquired_refs, refs, 1) -/* copy_stack_state() */ -COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) -#undef COPY_STATE_FN - -#define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \ -static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \ - bool copy_old) \ -{ \ - u32 old_size = state->COUNT; \ - struct bpf_##NAME##_state *new_##FIELD; \ - int slot = size / SIZE; \ - \ - if (size <= old_size || !size) { \ - if (copy_old) \ - return 0; \ - state->COUNT = slot * SIZE; \ - if (!size && old_size) { \ - kfree(state->FIELD); \ - state->FIELD = NULL; \ - } \ - return 0; \ - } \ - new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \ - GFP_KERNEL); \ - if (!new_##FIELD) \ - return -ENOMEM; \ - if (copy_old) { \ - if (state->FIELD) \ - memcpy(new_##FIELD, state->FIELD, \ - sizeof(*new_##FIELD) * (old_size / SIZE)); \ - memset(new_##FIELD + old_size / SIZE, 0, \ - sizeof(*new_##FIELD) * (size - old_size) / SIZE); \ - } \ - state->COUNT = slot * SIZE; \ - kfree(state->FIELD); \ - state->FIELD = new_##FIELD; \ - return 0; \ -} -/* realloc_reference_state() */ -REALLOC_STATE_FN(reference, acquired_refs, refs, 1) -/* realloc_stack_state() */ -REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) -#undef REALLOC_STATE_FN - -/* do_check() starts with zero-sized stack in struct bpf_verifier_state to - * make it consume minimal amount of memory. check_stack_write() access from - * the program calls into realloc_func_state() to grow the stack size. - * Note there is a non-zero 'parent' pointer inside bpf_verifier_state - * which realloc_stack_state() copies over. It points to previous - * bpf_verifier_state which is never reallocated. +/* copy array src of length n * size bytes to dst. dst is reallocated if it's too + * small to hold src. This is different from krealloc since we don't want to preserve + * the contents of dst. + * + * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could + * not be allocated. */ -static int realloc_func_state(struct bpf_func_state *state, int stack_size, - int refs_size, bool copy_old) +static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags) { - int err = realloc_reference_state(state, refs_size, copy_old); - if (err) - return err; - return realloc_stack_state(state, stack_size, copy_old); + size_t bytes; + + if (ZERO_OR_NULL_PTR(src)) + goto out; + + if (unlikely(check_mul_overflow(n, size, &bytes))) + return NULL; + + if (ksize(dst) < bytes) { + kfree(dst); + dst = kmalloc_track_caller(bytes, flags); + if (!dst) + return NULL; + } + + memcpy(dst, src, bytes); +out: + return dst ? dst : ZERO_SIZE_PTR; +} + +/* resize an array from old_n items to new_n items. the array is reallocated if it's too + * small to hold new_n items. new items are zeroed out if the array grows. + * + * Contrary to krealloc_array, does not free arr if new_n is zero. + */ +static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size) +{ + if (!new_n || old_n == new_n) + goto out; + + arr = krealloc_array(arr, new_n, size, GFP_KERNEL); + if (!arr) + return NULL; + + if (new_n > old_n) + memset(arr + old_n * size, 0, (new_n - old_n) * size); + +out: + return arr ? arr : ZERO_SIZE_PTR; +} + +static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src) +{ + dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs, + sizeof(struct bpf_reference_state), GFP_KERNEL); + if (!dst->refs) + return -ENOMEM; + + dst->acquired_refs = src->acquired_refs; + return 0; +} + +static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src) +{ + size_t n = src->allocated_stack / BPF_REG_SIZE; + + dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state), + GFP_KERNEL); + if (!dst->stack) + return -ENOMEM; + + dst->allocated_stack = src->allocated_stack; + return 0; +} + +static int resize_reference_state(struct bpf_func_state *state, size_t n) +{ + state->refs = realloc_array(state->refs, state->acquired_refs, n, + sizeof(struct bpf_reference_state)); + if (!state->refs) + return -ENOMEM; + + state->acquired_refs = n; + return 0; +} + +static int grow_stack_state(struct bpf_func_state *state, int size) +{ + size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE; + + if (old_n >= n) + return 0; + + state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state)); + if (!state->stack) + return -ENOMEM; + + state->allocated_stack = size; + return 0; } /* Acquire a pointer id from the env and update the state->refs to include @@ -825,7 +848,7 @@ static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) int new_ofs = state->acquired_refs; int id, err; - err = realloc_reference_state(state, state->acquired_refs + 1, true); + err = resize_reference_state(state, state->acquired_refs + 1); if (err) return err; id = ++env->id_gen; @@ -854,18 +877,6 @@ static int release_reference_state(struct bpf_func_state *state, int ptr_id) return -EINVAL; } -static int transfer_reference_state(struct bpf_func_state *dst, - struct bpf_func_state *src) -{ - int err = realloc_reference_state(dst, src->acquired_refs, false); - if (err) - return err; - err = copy_reference_state(dst, src); - if (err) - return err; - return 0; -} - static void free_func_state(struct bpf_func_state *state) { if (!state) @@ -904,10 +915,6 @@ static int copy_func_state(struct bpf_func_state *dst, { int err; - err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs, - false); - if (err) - return err; memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); err = copy_reference_state(dst, src); if (err) @@ -919,16 +926,13 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, const struct bpf_verifier_state *src) { struct bpf_func_state *dst; - u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt; int i, err; - if (dst_state->jmp_history_cnt < src->jmp_history_cnt) { - kfree(dst_state->jmp_history); - dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER); - if (!dst_state->jmp_history) - return -ENOMEM; - } - memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz); + dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history, + src->jmp_history_cnt, sizeof(struct bpf_idx_pair), + GFP_USER); + if (!dst_state->jmp_history) + return -ENOMEM; dst_state->jmp_history_cnt = src->jmp_history_cnt; /* if dst has more stack frames then src frame, free them */ @@ -2590,8 +2594,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg; struct bpf_reg_state *reg = NULL; - err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), - state->acquired_refs, true); + err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE)); if (err) return err; /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, @@ -2613,7 +2616,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, if (dst_reg != BPF_REG_FP) { /* The backtracking logic can only recognize explicit * stack slot address like [fp - 8]. Other spill of - * scalar via different register has to be conervative. + * scalar via different register has to be conservative. * Backtrack from here and mark all registers as precise * that contributed into 'reg' being a constant. */ @@ -2753,8 +2756,7 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env, if (value_reg && register_is_null(value_reg)) writing_zero = true; - err = realloc_func_state(state, round_up(-min_off, BPF_REG_SIZE), - state->acquired_refs, true); + err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE)); if (err) return err; @@ -5629,7 +5631,7 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn subprog /* subprog number within this prog */); /* Transfer references to the callee */ - err = transfer_reference_state(callee, caller); + err = copy_reference_state(callee, caller); if (err) return err; @@ -5780,7 +5782,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) } /* Transfer references to the caller */ - err = transfer_reference_state(caller, callee); + err = copy_reference_state(caller, callee); if (err) return err; @@ -6483,6 +6485,27 @@ struct bpf_sanitize_info { bool mask_to_left; }; +static struct bpf_verifier_state * +sanitize_speculative_path(struct bpf_verifier_env *env, + const struct bpf_insn *insn, + u32 next_idx, u32 curr_idx) +{ + struct bpf_verifier_state *branch; + struct bpf_reg_state *regs; + + branch = push_stack(env, next_idx, curr_idx, true); + if (branch && insn) { + regs = branch->frame[branch->curframe]->regs; + if (BPF_SRC(insn->code) == BPF_K) { + mark_reg_unknown(env, regs, insn->dst_reg); + } else if (BPF_SRC(insn->code) == BPF_X) { + mark_reg_unknown(env, regs, insn->dst_reg); + mark_reg_unknown(env, regs, insn->src_reg); + } + } + return branch; +} + static int sanitize_ptr_alu(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg, @@ -6566,12 +6589,26 @@ do_sim: tmp = *dst_reg; *dst_reg = *ptr_reg; } - ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); + ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, + env->insn_idx); if (!ptr_is_dst_reg && ret) *dst_reg = tmp; return !ret ? REASON_STACK : 0; } +static void sanitize_mark_insn_seen(struct bpf_verifier_env *env) +{ + struct bpf_verifier_state *vstate = env->cur_state; + + /* If we simulate paths under speculation, we don't update the + * insn as 'seen' such that when we verify unreachable paths in + * the non-speculative domain, sanitize_dead_code() can still + * rewrite/sanitize them. + */ + if (!vstate->speculative) + env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; +} + static int sanitize_err(struct bpf_verifier_env *env, const struct bpf_insn *insn, int reason, const struct bpf_reg_state *off_reg, @@ -8750,14 +8787,28 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, if (err) return err; } + if (pred == 1) { - /* only follow the goto, ignore fall-through */ + /* Only follow the goto, ignore fall-through. If needed, push + * the fall-through branch for simulation under speculative + * execution. + */ + if (!env->bypass_spec_v1 && + !sanitize_speculative_path(env, insn, *insn_idx + 1, + *insn_idx)) + return -EFAULT; *insn_idx += insn->off; return 0; } else if (pred == 0) { - /* only follow fall-through branch, since - * that's where the program will go + /* Only follow the fall-through branch, since that's where the + * program will go. If needed, push the goto branch for + * simulation under speculative execution. */ + if (!env->bypass_spec_v1 && + !sanitize_speculative_path(env, insn, + *insn_idx + insn->off + 1, + *insn_idx)) + return -EFAULT; return 0; } @@ -8919,12 +8970,14 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) mark_reg_known_zero(env, regs, insn->dst_reg); dst_reg->map_ptr = map; - if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) { + if (insn->src_reg == BPF_PSEUDO_MAP_VALUE || + insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) { dst_reg->type = PTR_TO_MAP_VALUE; dst_reg->off = aux->map_off; if (map_value_has_spin_lock(map)) dst_reg->id = ++env->id_gen; - } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) { + } else if (insn->src_reg == BPF_PSEUDO_MAP_FD || + insn->src_reg == BPF_PSEUDO_MAP_IDX) { dst_reg->type = CONST_PTR_TO_MAP; } else { verbose(env, "bpf verifier is misconfigured\n"); @@ -9055,7 +9108,7 @@ static int check_return_code(struct bpf_verifier_env *env) !prog->aux->attach_func_proto->type) return 0; - /* eBPF calling convetion is such that R0 is used + /* eBPF calling convention is such that R0 is used * to return the value from eBPF program. * Make sure that it's readable at this time * of bpf_exit, which means that program wrote @@ -9440,7 +9493,7 @@ static int check_abnormal_return(struct bpf_verifier_env *env) static int check_btf_func(struct bpf_verifier_env *env, const union bpf_attr *attr, - union bpf_attr __user *uattr) + bpfptr_t uattr) { const struct btf_type *type, *func_proto, *ret_type; u32 i, nfuncs, urec_size, min_size; @@ -9449,7 +9502,7 @@ static int check_btf_func(struct bpf_verifier_env *env, struct bpf_func_info_aux *info_aux = NULL; struct bpf_prog *prog; const struct btf *btf; - void __user *urecord; + bpfptr_t urecord; u32 prev_offset = 0; bool scalar_return; int ret = -ENOMEM; @@ -9477,7 +9530,7 @@ static int check_btf_func(struct bpf_verifier_env *env, prog = env->prog; btf = prog->aux->btf; - urecord = u64_to_user_ptr(attr->func_info); + urecord = make_bpfptr(attr->func_info, uattr.is_kernel); min_size = min_t(u32, krec_size, urec_size); krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); @@ -9495,13 +9548,15 @@ static int check_btf_func(struct bpf_verifier_env *env, /* set the size kernel expects so loader can zero * out the rest of the record. */ - if (put_user(min_size, &uattr->func_info_rec_size)) + if (copy_to_bpfptr_offset(uattr, + offsetof(union bpf_attr, func_info_rec_size), + &min_size, sizeof(min_size))) ret = -EFAULT; } goto err_free; } - if (copy_from_user(&krecord[i], urecord, min_size)) { + if (copy_from_bpfptr(&krecord[i], urecord, min_size)) { ret = -EFAULT; goto err_free; } @@ -9553,7 +9608,7 @@ static int check_btf_func(struct bpf_verifier_env *env, } prev_offset = krecord[i].insn_off; - urecord += urec_size; + bpfptr_add(&urecord, urec_size); } prog->aux->func_info = krecord; @@ -9585,14 +9640,14 @@ static void adjust_btf_func(struct bpf_verifier_env *env) static int check_btf_line(struct bpf_verifier_env *env, const union bpf_attr *attr, - union bpf_attr __user *uattr) + bpfptr_t uattr) { u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0; struct bpf_subprog_info *sub; struct bpf_line_info *linfo; struct bpf_prog *prog; const struct btf *btf; - void __user *ulinfo; + bpfptr_t ulinfo; int err; nr_linfo = attr->line_info_cnt; @@ -9618,7 +9673,7 @@ static int check_btf_line(struct bpf_verifier_env *env, s = 0; sub = env->subprog_info; - ulinfo = u64_to_user_ptr(attr->line_info); + ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel); expected_size = sizeof(struct bpf_line_info); ncopy = min_t(u32, expected_size, rec_size); for (i = 0; i < nr_linfo; i++) { @@ -9626,14 +9681,15 @@ static int check_btf_line(struct bpf_verifier_env *env, if (err) { if (err == -E2BIG) { verbose(env, "nonzero tailing record in line_info"); - if (put_user(expected_size, - &uattr->line_info_rec_size)) + if (copy_to_bpfptr_offset(uattr, + offsetof(union bpf_attr, line_info_rec_size), + &expected_size, sizeof(expected_size))) err = -EFAULT; } goto err_free; } - if (copy_from_user(&linfo[i], ulinfo, ncopy)) { + if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) { err = -EFAULT; goto err_free; } @@ -9685,7 +9741,7 @@ static int check_btf_line(struct bpf_verifier_env *env, } prev_offset = linfo[i].insn_off; - ulinfo += rec_size; + bpfptr_add(&ulinfo, rec_size); } if (s != env->subprog_cnt) { @@ -9707,7 +9763,7 @@ err_free: static int check_btf_info(struct bpf_verifier_env *env, const union bpf_attr *attr, - union bpf_attr __user *uattr) + bpfptr_t uattr) { struct btf *btf; int err; @@ -9752,13 +9808,6 @@ static bool range_within(struct bpf_reg_state *old, old->s32_max_value >= cur->s32_max_value; } -/* Maximum number of register states that can exist at once */ -#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) -struct idpair { - u32 old; - u32 cur; -}; - /* If in the old state two registers had the same id, then they need to have * the same id in the new state as well. But that id could be different from * the old state, so we need to track the mapping from old to new ids. @@ -9769,11 +9818,11 @@ struct idpair { * So we look through our idmap to see if this old id has been seen before. If * so, we require the new id to match; otherwise, we add the id pair to the map. */ -static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) +static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap) { unsigned int i; - for (i = 0; i < ID_MAP_SIZE; i++) { + for (i = 0; i < BPF_ID_MAP_SIZE; i++) { if (!idmap[i].old) { /* Reached an empty slot; haven't seen this id before */ idmap[i].old = old_id; @@ -9850,7 +9899,7 @@ static void clean_verifier_state(struct bpf_verifier_env *env, * Since the verifier pushes the branch states as it sees them while exploring * the program the condition of walking the branch instruction for the second * time means that all states below this branch were already explored and - * their final liveness markes are already propagated. + * their final liveness marks are already propagated. * Hence when the verifier completes the search of state list in is_state_visited() * we can call this clean_live_states() function to mark all liveness states * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state' @@ -9886,7 +9935,7 @@ next: /* Returns true if (rold safe implies rcur safe) */ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, - struct idpair *idmap) + struct bpf_id_pair *idmap) { bool equal; @@ -10004,7 +10053,7 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, static bool stacksafe(struct bpf_func_state *old, struct bpf_func_state *cur, - struct idpair *idmap) + struct bpf_id_pair *idmap) { int i, spi; @@ -10101,32 +10150,23 @@ static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur) * whereas register type in current state is meaningful, it means that * the current state will reach 'bpf_exit' instruction safely */ -static bool func_states_equal(struct bpf_func_state *old, +static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old, struct bpf_func_state *cur) { - struct idpair *idmap; - bool ret = false; int i; - idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); - /* If we failed to allocate the idmap, just say it's not safe */ - if (!idmap) - return false; - - for (i = 0; i < MAX_BPF_REG; i++) { - if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) - goto out_free; - } + memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch)); + for (i = 0; i < MAX_BPF_REG; i++) + if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch)) + return false; - if (!stacksafe(old, cur, idmap)) - goto out_free; + if (!stacksafe(old, cur, env->idmap_scratch)) + return false; if (!refsafe(old, cur)) - goto out_free; - ret = true; -out_free: - kfree(idmap); - return ret; + return false; + + return true; } static bool states_equal(struct bpf_verifier_env *env, @@ -10153,7 +10193,7 @@ static bool states_equal(struct bpf_verifier_env *env, for (i = 0; i <= old->curframe; i++) { if (old->frame[i]->callsite != cur->frame[i]->callsite) return false; - if (!func_states_equal(old->frame[i], cur->frame[i])) + if (!func_states_equal(env, old->frame[i], cur->frame[i])) return false; } return true; @@ -10630,7 +10670,7 @@ static int do_check(struct bpf_verifier_env *env) } regs = cur_regs(env); - env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; + sanitize_mark_insn_seen(env); prev_insn_idx = env->insn_idx; if (class == BPF_ALU || class == BPF_ALU64) { @@ -10857,7 +10897,7 @@ process_bpf_exit: return err; env->insn_idx++; - env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; + sanitize_mark_insn_seen(env); } else { verbose(env, "invalid BPF_LD mode\n"); return -EINVAL; @@ -11190,6 +11230,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) struct bpf_map *map; struct fd f; u64 addr; + u32 fd; if (i == insn_cnt - 1 || insn[1].code != 0 || insn[1].dst_reg != 0 || insn[1].src_reg != 0 || @@ -11219,16 +11260,38 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) /* In final convert_pseudo_ld_imm64() step, this is * converted into regular 64-bit imm load insn. */ - if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD && - insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) || - (insn[0].src_reg == BPF_PSEUDO_MAP_FD && - insn[1].imm != 0)) { - verbose(env, - "unrecognized bpf_ld_imm64 insn\n"); + switch (insn[0].src_reg) { + case BPF_PSEUDO_MAP_VALUE: + case BPF_PSEUDO_MAP_IDX_VALUE: + break; + case BPF_PSEUDO_MAP_FD: + case BPF_PSEUDO_MAP_IDX: + if (insn[1].imm == 0) + break; + fallthrough; + default: + verbose(env, "unrecognized bpf_ld_imm64 insn\n"); return -EINVAL; } - f = fdget(insn[0].imm); + switch (insn[0].src_reg) { + case BPF_PSEUDO_MAP_IDX_VALUE: + case BPF_PSEUDO_MAP_IDX: + if (bpfptr_is_null(env->fd_array)) { + verbose(env, "fd_idx without fd_array is invalid\n"); + return -EPROTO; + } + if (copy_from_bpfptr_offset(&fd, env->fd_array, + insn[0].imm * sizeof(fd), + sizeof(fd))) + return -EFAULT; + break; + default: + fd = insn[0].imm; + break; + } + + f = fdget(fd); map = __bpf_map_get(f); if (IS_ERR(map)) { verbose(env, "fd %d is not pointing to valid bpf_map\n", @@ -11243,7 +11306,8 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) } aux = &env->insn_aux_data[i]; - if (insn->src_reg == BPF_PSEUDO_MAP_FD) { + if (insn[0].src_reg == BPF_PSEUDO_MAP_FD || + insn[0].src_reg == BPF_PSEUDO_MAP_IDX) { addr = (unsigned long)map; } else { u32 off = insn[1].imm; @@ -11366,6 +11430,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, { struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; struct bpf_insn *insn = new_prog->insnsi; + u32 old_seen = old_data[off].seen; u32 prog_len; int i; @@ -11386,7 +11451,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, memcpy(new_data + off + cnt - 1, old_data + off, sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); for (i = off; i < off + cnt - 1; i++) { - new_data[i].seen = env->pass_cnt; + /* Expand insni[off]'s seen count to the patched range. */ + new_data[i].seen = old_seen; new_data[i].zext_dst = insn_has_def32(env, insn + i); } env->insn_aux_data = new_data; @@ -11408,7 +11474,7 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len } } -static void adjust_poke_descs(struct bpf_prog *prog, u32 len) +static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len) { struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; int i, sz = prog->aux->size_poke_tab; @@ -11416,6 +11482,8 @@ static void adjust_poke_descs(struct bpf_prog *prog, u32 len) for (i = 0; i < sz; i++) { desc = &tab[i]; + if (desc->insn_idx <= off) + continue; desc->insn_idx += len - 1; } } @@ -11436,7 +11504,7 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of if (adjust_insn_aux_data(env, new_prog, off, len)) return NULL; adjust_subprog_starts(env, off, len); - adjust_poke_descs(new_prog, len); + adjust_poke_descs(new_prog, off, len); return new_prog; } @@ -12455,7 +12523,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) prog->aux->max_pkt_offset = MAX_PACKET_OFF; /* mark bpf_tail_call as different opcode to avoid - * conditional branch in the interpeter for every normal + * conditional branch in the interpreter for every normal * call and to prevent accidental JITing by JIT compiler * that doesn't support bpf_tail_call yet */ @@ -12710,6 +12778,9 @@ static void free_states(struct bpf_verifier_env *env) * insn_aux_data was touched. These variables are compared to clear temporary * data from failed pass. For testing and experiments do_check_common() can be * run multiple times even when prior attempt to verify is unsuccessful. + * + * Note that special handling is needed on !env->bypass_spec_v1 if this is + * ever called outside of error path with subsequent program rejection. */ static void sanitize_insn_aux_data(struct bpf_verifier_env *env) { @@ -13227,6 +13298,14 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) int ret; u64 key; + if (prog->type == BPF_PROG_TYPE_SYSCALL) { + if (prog->aux->sleepable) + /* attach_btf_id checked to be zero already */ + return 0; + verbose(env, "Syscall programs can only be sleepable\n"); + return -EINVAL; + } + if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING && prog->type != BPF_PROG_TYPE_LSM) { verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n"); @@ -13301,8 +13380,7 @@ struct btf *bpf_get_btf_vmlinux(void) return btf_vmlinux; } -int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, - union bpf_attr __user *uattr) +int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr) { u64 start_time = ktime_get_ns(); struct bpf_verifier_env *env; @@ -13332,6 +13410,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, env->insn_aux_data[i].orig_idx = i; env->prog = *prog; env->ops = bpf_verifier_ops[env->prog->type]; + env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel); is_priv = bpf_capable(); bpf_get_btf_vmlinux(); diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 8190b6bfc978..ee93b6e89587 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -713,7 +713,7 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) css_task_iter_start(&cgrp->self, 0, &it); while ((tsk = css_task_iter_next(&it))) { - switch (tsk->state) { + switch (READ_ONCE(tsk->__state)) { case TASK_RUNNING: stats->nr_running++; break; @@ -820,6 +820,10 @@ static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent struct cgroup *cgrp = kn->priv; int ret; + /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */ + if (strchr(new_name_str, '\n')) + return -EINVAL; + if (kernfs_type(kn) != KERNFS_DIR) return -ENOTDIR; if (kn->parent != new_parent) diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 21ecc6ee6a6d..3a0161c21b6b 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -209,6 +209,22 @@ struct cgroup_namespace init_cgroup_ns = { static struct file_system_type cgroup2_fs_type; static struct cftype cgroup_base_files[]; +/* cgroup optional features */ +enum cgroup_opt_features { +#ifdef CONFIG_PSI + OPT_FEATURE_PRESSURE, +#endif + OPT_FEATURE_COUNT +}; + +static const char *cgroup_opt_feature_names[OPT_FEATURE_COUNT] = { +#ifdef CONFIG_PSI + "pressure", +#endif +}; + +static u16 cgroup_feature_disable_mask __read_mostly; + static int cgroup_apply_control(struct cgroup *cgrp); static void cgroup_finalize_control(struct cgroup *cgrp, int ret); static void css_task_iter_skip(struct css_task_iter *it, @@ -577,6 +593,7 @@ out_unlock: rcu_read_unlock(); return css; } +EXPORT_SYMBOL_GPL(cgroup_get_e_css); static void cgroup_get_live(struct cgroup *cgrp) { @@ -2389,7 +2406,7 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, } /** - * cgroup_taskset_migrate - migrate a taskset + * cgroup_migrate_execute - migrate a taskset * @mgctx: migration context * * Migrate tasks in @mgctx as setup by migration preparation functions. @@ -3631,6 +3648,18 @@ static void cgroup_pressure_release(struct kernfs_open_file *of) { psi_trigger_replace(&of->priv, NULL); } + +bool cgroup_psi_enabled(void) +{ + return (cgroup_feature_disable_mask & (1 << OPT_FEATURE_PRESSURE)) == 0; +} + +#else /* CONFIG_PSI */ +bool cgroup_psi_enabled(void) +{ + return false; +} + #endif /* CONFIG_PSI */ static int cgroup_freeze_show(struct seq_file *seq, void *v) @@ -3667,6 +3696,80 @@ static ssize_t cgroup_freeze_write(struct kernfs_open_file *of, return nbytes; } +static void __cgroup_kill(struct cgroup *cgrp) +{ + struct css_task_iter it; + struct task_struct *task; + + lockdep_assert_held(&cgroup_mutex); + + spin_lock_irq(&css_set_lock); + set_bit(CGRP_KILL, &cgrp->flags); + spin_unlock_irq(&css_set_lock); + + css_task_iter_start(&cgrp->self, CSS_TASK_ITER_PROCS | CSS_TASK_ITER_THREADED, &it); + while ((task = css_task_iter_next(&it))) { + /* Ignore kernel threads here. */ + if (task->flags & PF_KTHREAD) + continue; + + /* Skip tasks that are already dying. */ + if (__fatal_signal_pending(task)) + continue; + + send_sig(SIGKILL, task, 0); + } + css_task_iter_end(&it); + + spin_lock_irq(&css_set_lock); + clear_bit(CGRP_KILL, &cgrp->flags); + spin_unlock_irq(&css_set_lock); +} + +static void cgroup_kill(struct cgroup *cgrp) +{ + struct cgroup_subsys_state *css; + struct cgroup *dsct; + + lockdep_assert_held(&cgroup_mutex); + + cgroup_for_each_live_descendant_pre(dsct, css, cgrp) + __cgroup_kill(dsct); +} + +static ssize_t cgroup_kill_write(struct kernfs_open_file *of, char *buf, + size_t nbytes, loff_t off) +{ + ssize_t ret = 0; + int kill; + struct cgroup *cgrp; + + ret = kstrtoint(strstrip(buf), 0, &kill); + if (ret) + return ret; + + if (kill != 1) + return -ERANGE; + + cgrp = cgroup_kn_lock_live(of->kn, false); + if (!cgrp) + return -ENOENT; + + /* + * Killing is a process directed operation, i.e. the whole thread-group + * is taken down so act like we do for cgroup.procs and only make this + * writable in non-threaded cgroups. + */ + if (cgroup_is_threaded(cgrp)) + ret = -EOPNOTSUPP; + else + cgroup_kill(cgrp); + + cgroup_kn_unlock(of->kn); + + return ret ?: nbytes; +} + static int cgroup_file_open(struct kernfs_open_file *of) { struct cftype *cft = of_cft(of); @@ -3881,6 +3984,8 @@ static int cgroup_addrm_files(struct cgroup_subsys_state *css, restart: for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) { /* does cft->flags tell us to skip this file on @cgrp? */ + if ((cft->flags & CFTYPE_PRESSURE) && !cgroup_psi_enabled()) + continue; if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp)) continue; if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp)) @@ -3958,6 +4063,9 @@ static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) WARN_ON(cft->ss || cft->kf_ops); + if ((cft->flags & CFTYPE_PRESSURE) && !cgroup_psi_enabled()) + continue; + if (cft->seq_start) kf_ops = &cgroup_kf_ops; else @@ -4860,12 +4968,18 @@ static struct cftype cgroup_base_files[] = { .write = cgroup_freeze_write, }, { + .name = "cgroup.kill", + .flags = CFTYPE_NOT_ON_ROOT, + .write = cgroup_kill_write, + }, + { .name = "cpu.stat", .seq_show = cpu_stat_show, }, #ifdef CONFIG_PSI { .name = "io.pressure", + .flags = CFTYPE_PRESSURE, .seq_show = cgroup_io_pressure_show, .write = cgroup_io_pressure_write, .poll = cgroup_pressure_poll, @@ -4873,6 +4987,7 @@ static struct cftype cgroup_base_files[] = { }, { .name = "memory.pressure", + .flags = CFTYPE_PRESSURE, .seq_show = cgroup_memory_pressure_show, .write = cgroup_memory_pressure_write, .poll = cgroup_pressure_poll, @@ -4880,6 +4995,7 @@ static struct cftype cgroup_base_files[] = { }, { .name = "cpu.pressure", + .flags = CFTYPE_PRESSURE, .seq_show = cgroup_cpu_pressure_show, .write = cgroup_cpu_pressure_write, .poll = cgroup_pressure_poll, @@ -5771,6 +5887,31 @@ void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen) } /* + * cgroup_get_from_id : get the cgroup associated with cgroup id + * @id: cgroup id + * On success return the cgrp, on failure return NULL + */ +struct cgroup *cgroup_get_from_id(u64 id) +{ + struct kernfs_node *kn; + struct cgroup *cgrp = NULL; + + mutex_lock(&cgroup_mutex); + kn = kernfs_find_and_get_node_by_id(cgrp_dfl_root.kf_root, id); + if (!kn) + goto out_unlock; + + cgrp = kn->priv; + if (cgroup_is_dead(cgrp) || !cgroup_tryget(cgrp)) + cgrp = NULL; + kernfs_put(kn); +out_unlock: + mutex_unlock(&cgroup_mutex); + return cgrp; +} +EXPORT_SYMBOL_GPL(cgroup_get_from_id); + +/* * proc_cgroup_show() * - Print task's cgroup paths into seq_file, one line for each hierarchy * - Used for /proc/<pid>/cgroup. @@ -6079,6 +6220,8 @@ void cgroup_post_fork(struct task_struct *child, struct kernel_clone_args *kargs) __releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex) { + unsigned long cgrp_flags = 0; + bool kill = false; struct cgroup_subsys *ss; struct css_set *cset; int i; @@ -6090,6 +6233,11 @@ void cgroup_post_fork(struct task_struct *child, /* init tasks are special, only link regular threads */ if (likely(child->pid)) { + if (kargs->cgrp) + cgrp_flags = kargs->cgrp->flags; + else + cgrp_flags = cset->dfl_cgrp->flags; + WARN_ON_ONCE(!list_empty(&child->cg_list)); cset->nr_tasks++; css_set_move_task(child, NULL, cset, false); @@ -6098,23 +6246,32 @@ void cgroup_post_fork(struct task_struct *child, cset = NULL; } - /* - * If the cgroup has to be frozen, the new task has too. Let's set - * the JOBCTL_TRAP_FREEZE jobctl bit to get the task into the - * frozen state. - */ - if (unlikely(cgroup_task_freeze(child))) { - spin_lock(&child->sighand->siglock); - WARN_ON_ONCE(child->frozen); - child->jobctl |= JOBCTL_TRAP_FREEZE; - spin_unlock(&child->sighand->siglock); + if (!(child->flags & PF_KTHREAD)) { + if (unlikely(test_bit(CGRP_FREEZE, &cgrp_flags))) { + /* + * If the cgroup has to be frozen, the new task has + * too. Let's set the JOBCTL_TRAP_FREEZE jobctl bit to + * get the task into the frozen state. + */ + spin_lock(&child->sighand->siglock); + WARN_ON_ONCE(child->frozen); + child->jobctl |= JOBCTL_TRAP_FREEZE; + spin_unlock(&child->sighand->siglock); + + /* + * Calling cgroup_update_frozen() isn't required here, + * because it will be called anyway a bit later from + * do_freezer_trap(). So we avoid cgroup's transient + * switch from the frozen state and back. + */ + } /* - * Calling cgroup_update_frozen() isn't required here, - * because it will be called anyway a bit later from - * do_freezer_trap(). So we avoid cgroup's transient switch - * from the frozen state and back. + * If the cgroup is to be killed notice it now and take the + * child down right after we finished preparing it for + * userspace. */ + kill = test_bit(CGRP_KILL, &cgrp_flags); } spin_unlock_irq(&css_set_lock); @@ -6137,6 +6294,10 @@ void cgroup_post_fork(struct task_struct *child, put_css_set(rcset); } + /* Cgroup has to be killed so take down child immediately. */ + if (unlikely(kill)) + do_send_sig_info(SIGKILL, SEND_SIG_NOINFO, child, PIDTYPE_TGID); + cgroup_css_set_put_fork(kargs); } @@ -6162,7 +6323,8 @@ void cgroup_exit(struct task_struct *tsk) cset->nr_tasks--; WARN_ON_ONCE(cgroup_task_frozen(tsk)); - if (unlikely(cgroup_task_freeze(tsk))) + if (unlikely(!(tsk->flags & PF_KTHREAD) && + test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags))) cgroup_update_frozen(task_dfl_cgroup(tsk)); spin_unlock_irq(&css_set_lock); @@ -6213,6 +6375,15 @@ static int __init cgroup_disable(char *str) pr_info("Disabling %s control group subsystem\n", ss->name); } + + for (i = 0; i < OPT_FEATURE_COUNT; i++) { + if (strcmp(token, cgroup_opt_feature_names[i])) + continue; + cgroup_feature_disable_mask |= 1 << i; + pr_info("Disabling %s control group feature\n", + cgroup_opt_feature_names[i]); + break; + } } return 1; } @@ -6511,6 +6682,9 @@ static ssize_t show_delegatable_files(struct cftype *files, char *buf, if (!(cft->flags & CFTYPE_NS_DELEGATABLE)) continue; + if ((cft->flags & CFTYPE_PRESSURE) && !cgroup_psi_enabled()) + continue; + if (prefix) ret += snprintf(buf + ret, size - ret, "%s.", prefix); diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index cee265cb535c..7f0e58917432 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -220,7 +220,7 @@ void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp) } /** - * cgroup_rstat_flush_begin - flush stats in @cgrp's subtree and hold + * cgroup_rstat_flush_hold - flush stats in @cgrp's subtree and hold * @cgrp: target cgroup * * Flush stats in @cgrp's subtree and prevent further flushes. Must be diff --git a/kernel/cpu.c b/kernel/cpu.c index e538518556f4..804b847912dc 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -32,6 +32,7 @@ #include <linux/relay.h> #include <linux/slab.h> #include <linux/percpu-rwsem.h> +#include <linux/cpuset.h> #include <trace/events/power.h> #define CREATE_TRACE_POINTS @@ -873,6 +874,52 @@ void __init cpuhp_threads_init(void) kthread_unpark(this_cpu_read(cpuhp_state.thread)); } +/* + * + * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock + * protected region. + * + * The operation is still serialized against concurrent CPU hotplug via + * cpu_add_remove_lock, i.e. CPU map protection. But it is _not_ + * serialized against other hotplug related activity like adding or + * removing of state callbacks and state instances, which invoke either the + * startup or the teardown callback of the affected state. + * + * This is required for subsystems which are unfixable vs. CPU hotplug and + * evade lock inversion problems by scheduling work which has to be + * completed _before_ cpu_up()/_cpu_down() returns. + * + * Don't even think about adding anything to this for any new code or even + * drivers. It's only purpose is to keep existing lock order trainwrecks + * working. + * + * For cpu_down() there might be valid reasons to finish cleanups which are + * not required to be done under cpu_hotplug_lock, but that's a different + * story and would be not invoked via this. + */ +static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen) +{ + /* + * cpusets delegate hotplug operations to a worker to "solve" the + * lock order problems. Wait for the worker, but only if tasks are + * _not_ frozen (suspend, hibernate) as that would wait forever. + * + * The wait is required because otherwise the hotplug operation + * returns with inconsistent state, which could even be observed in + * user space when a new CPU is brought up. The CPU plug uevent + * would be delivered and user space reacting on it would fail to + * move tasks to the newly plugged CPU up to the point where the + * work has finished because up to that point the newly plugged CPU + * is not assignable in cpusets/cgroups. On unplug that's not + * necessarily a visible issue, but it is still inconsistent state, + * which is the real problem which needs to be "fixed". This can't + * prevent the transient state between scheduling the work and + * returning from waiting for it. + */ + if (!tasks_frozen) + cpuset_wait_for_hotplug(); +} + #ifdef CONFIG_HOTPLUG_CPU #ifndef arch_clear_mm_cpumask_cpu #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm)) @@ -961,7 +1008,7 @@ static int takedown_cpu(unsigned int cpu) int err; /* Park the smpboot threads */ - kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); + kthread_park(st->thread); /* * Prevent irq alloc/free while the dying cpu reorganizes the @@ -977,7 +1024,7 @@ static int takedown_cpu(unsigned int cpu) /* CPU refused to die */ irq_unlock_sparse(); /* Unpark the hotplug thread so we can rollback there */ - kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread); + kthread_unpark(st->thread); return err; } BUG_ON(cpu_online(cpu)); @@ -1108,6 +1155,7 @@ out: */ lockup_detector_cleanup(); arch_smt_update(); + cpu_up_down_serialize_trainwrecks(tasks_frozen); return ret; } @@ -1302,6 +1350,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) out: cpus_write_unlock(); arch_smt_update(); + cpu_up_down_serialize_trainwrecks(tasks_frozen); return ret; } diff --git a/kernel/crash_core.c b/kernel/crash_core.c index 825284baaf46..da449c1cdca7 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -455,7 +455,7 @@ static int __init crash_save_vmcoreinfo_init(void) VMCOREINFO_SYMBOL(_stext); VMCOREINFO_SYMBOL(vmap_area_list); -#ifndef CONFIG_NEED_MULTIPLE_NODES +#ifndef CONFIG_NUMA VMCOREINFO_SYMBOL(mem_map); VMCOREINFO_SYMBOL(contig_page_data); #endif @@ -464,6 +464,7 @@ static int __init crash_save_vmcoreinfo_init(void) VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); VMCOREINFO_STRUCT_SIZE(mem_section); VMCOREINFO_OFFSET(mem_section, section_mem_map); + VMCOREINFO_NUMBER(SECTION_SIZE_BITS); VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS); #endif VMCOREINFO_STRUCT_SIZE(page); @@ -483,7 +484,7 @@ static int __init crash_save_vmcoreinfo_init(void) VMCOREINFO_OFFSET(page, compound_head); VMCOREINFO_OFFSET(pglist_data, node_zones); VMCOREINFO_OFFSET(pglist_data, nr_zones); -#ifdef CONFIG_FLAT_NODE_MEM_MAP +#ifdef CONFIG_FLATMEM VMCOREINFO_OFFSET(pglist_data, node_mem_map); #endif VMCOREINFO_OFFSET(pglist_data, node_start_pfn); diff --git a/kernel/cred.c b/kernel/cred.c index e1d274cd741b..e6fd2b3fc31f 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -60,6 +60,7 @@ struct cred init_cred = { .user = INIT_USER, .user_ns = &init_user_ns, .group_info = &init_groups, + .ucounts = &init_ucounts, }; static inline void set_cred_subscribers(struct cred *cred, int n) @@ -119,6 +120,8 @@ static void put_cred_rcu(struct rcu_head *rcu) if (cred->group_info) put_group_info(cred->group_info); free_uid(cred->user); + if (cred->ucounts) + put_ucounts(cred->ucounts); put_user_ns(cred->user_ns); kmem_cache_free(cred_jar, cred); } @@ -222,6 +225,7 @@ struct cred *cred_alloc_blank(void) #ifdef CONFIG_DEBUG_CREDENTIALS new->magic = CRED_MAGIC; #endif + new->ucounts = get_ucounts(&init_ucounts); if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0) goto error; @@ -284,6 +288,11 @@ struct cred *prepare_creds(void) if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) goto error; + + new->ucounts = get_ucounts(new->ucounts); + if (!new->ucounts) + goto error; + validate_creds(new); return new; @@ -351,7 +360,7 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) kdebug("share_creds(%p{%d,%d})", p->cred, atomic_read(&p->cred->usage), read_cred_subscribers(p->cred)); - atomic_inc(&p->cred->user->processes); + inc_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); return 0; } @@ -363,6 +372,9 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) ret = create_user_ns(new); if (ret < 0) goto error_put; + ret = set_cred_ucounts(new); + if (ret < 0) + goto error_put; } #ifdef CONFIG_KEYS @@ -384,8 +396,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) } #endif - atomic_inc(&new->user->processes); p->cred = p->real_cred = get_cred(new); + inc_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); alter_cred_subscribers(new, 2); validate_creds(new); return 0; @@ -485,12 +497,12 @@ int commit_creds(struct cred *new) * in set_user(). */ alter_cred_subscribers(new, 2); - if (new->user != old->user) - atomic_inc(&new->user->processes); + if (new->user != old->user || new->user_ns != old->user_ns) + inc_rlimit_ucounts(new->ucounts, UCOUNT_RLIMIT_NPROC, 1); rcu_assign_pointer(task->real_cred, new); rcu_assign_pointer(task->cred, new); if (new->user != old->user) - atomic_dec(&old->user->processes); + dec_rlimit_ucounts(old->ucounts, UCOUNT_RLIMIT_NPROC, 1); alter_cred_subscribers(old, -2); /* send notifications */ @@ -653,6 +665,31 @@ int cred_fscmp(const struct cred *a, const struct cred *b) } EXPORT_SYMBOL(cred_fscmp); +int set_cred_ucounts(struct cred *new) +{ + struct task_struct *task = current; + const struct cred *old = task->real_cred; + struct ucounts *old_ucounts = new->ucounts; + + if (new->user == old->user && new->user_ns == old->user_ns) + return 0; + + /* + * This optimization is needed because alloc_ucounts() uses locks + * for table lookups. + */ + if (old_ucounts && old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid)) + return 0; + + if (!(new->ucounts = alloc_ucounts(new->user_ns, new->euid))) + return -EAGAIN; + + if (old_ucounts) + put_ucounts(old_ucounts); + + return 0; +} + /* * initialise the credentials stuff */ @@ -719,6 +756,10 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) goto error; + new->ucounts = get_ucounts(new->ucounts); + if (!new->ucounts) + goto error; + put_cred(old); validate_creds(new); return new; diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index cbc10f0a37a7..d8ee5647b732 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -2488,7 +2488,6 @@ static void kdb_sysinfo(struct sysinfo *val) static int kdb_summary(int argc, const char **argv) { time64_t now; - struct tm tm; struct sysinfo val; if (argc) @@ -2502,13 +2501,7 @@ static int kdb_summary(int argc, const char **argv) kdb_printf("domainname %s\n", init_uts_ns.name.domainname); now = __ktime_get_real_seconds(); - time64_to_tm(now, 0, &tm); - kdb_printf("date %04ld-%02d-%02d %02d:%02d:%02d " - "tz_minuteswest %d\n", - 1900+tm.tm_year, tm.tm_mon+1, tm.tm_mday, - tm.tm_hour, tm.tm_min, tm.tm_sec, - sys_tz.tz_minuteswest); - + kdb_printf("date %ptTs tz_minuteswest %d\n", &now, sys_tz.tz_minuteswest); kdb_sysinfo(&val); kdb_printf("uptime "); if (val.uptime > (24*60*60)) { diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c index 91bb666d7c03..9f50d22d68e6 100644 --- a/kernel/debug/kdb/kdb_support.c +++ b/kernel/debug/kdb/kdb_support.c @@ -609,23 +609,25 @@ unsigned long kdb_task_state_string(const char *s) */ char kdb_task_state_char (const struct task_struct *p) { - int cpu; - char state; + unsigned int p_state; unsigned long tmp; + char state; + int cpu; if (!p || copy_from_kernel_nofault(&tmp, (char *)p, sizeof(unsigned long))) return 'E'; cpu = kdb_process_cpu(p); - state = (p->state == 0) ? 'R' : - (p->state < 0) ? 'U' : - (p->state & TASK_UNINTERRUPTIBLE) ? 'D' : - (p->state & TASK_STOPPED) ? 'T' : - (p->state & TASK_TRACED) ? 'C' : + p_state = READ_ONCE(p->__state); + state = (p_state == 0) ? 'R' : + (p_state < 0) ? 'U' : + (p_state & TASK_UNINTERRUPTIBLE) ? 'D' : + (p_state & TASK_STOPPED) ? 'T' : + (p_state & TASK_TRACED) ? 'C' : (p->exit_state & EXIT_ZOMBIE) ? 'Z' : (p->exit_state & EXIT_DEAD) ? 'E' : - (p->state & TASK_INTERRUPTIBLE) ? 'S' : '?'; + (p_state & TASK_INTERRUPTIBLE) ? 'S' : '?'; if (is_idle_task(p)) { /* Idle task. Is it really idle, apart from the kdb * interrupt? */ diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 27725754ac99..51530d5b15a8 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -7,30 +7,64 @@ #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/sched/cputime.h> +#include <linux/sched/clock.h> #include <linux/slab.h> #include <linux/taskstats.h> -#include <linux/time.h> #include <linux/sysctl.h> #include <linux/delayacct.h> #include <linux/module.h> -int delayacct_on __read_mostly = 1; /* Delay accounting turned on/off */ -EXPORT_SYMBOL_GPL(delayacct_on); +DEFINE_STATIC_KEY_FALSE(delayacct_key); +int delayacct_on __read_mostly; /* Delay accounting turned on/off */ struct kmem_cache *delayacct_cache; -static int __init delayacct_setup_disable(char *str) +static void set_delayacct(bool enabled) { - delayacct_on = 0; + if (enabled) { + static_branch_enable(&delayacct_key); + delayacct_on = 1; + } else { + delayacct_on = 0; + static_branch_disable(&delayacct_key); + } +} + +static int __init delayacct_setup_enable(char *str) +{ + delayacct_on = 1; return 1; } -__setup("nodelayacct", delayacct_setup_disable); +__setup("delayacct", delayacct_setup_enable); void delayacct_init(void) { delayacct_cache = KMEM_CACHE(task_delay_info, SLAB_PANIC|SLAB_ACCOUNT); delayacct_tsk_init(&init_task); + set_delayacct(delayacct_on); } +#ifdef CONFIG_PROC_SYSCTL +int sysctl_delayacct(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos) +{ + int state = delayacct_on; + struct ctl_table t; + int err; + + if (write && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + t = *table; + t.data = &state; + err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); + if (err < 0) + return err; + if (write) + set_delayacct(state); + return err; +} +#endif + void __delayacct_tsk_init(struct task_struct *tsk) { tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL); @@ -42,10 +76,9 @@ void __delayacct_tsk_init(struct task_struct *tsk) * Finish delay accounting for a statistic using its timestamps (@start), * accumalator (@total) and @count */ -static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, - u32 *count) +static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *count) { - s64 ns = ktime_get_ns() - *start; + s64 ns = local_clock() - *start; unsigned long flags; if (ns > 0) { @@ -58,7 +91,7 @@ static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, void __delayacct_blkio_start(void) { - current->delays->blkio_start = ktime_get_ns(); + current->delays->blkio_start = local_clock(); } /* @@ -82,7 +115,7 @@ void __delayacct_blkio_end(struct task_struct *p) delayacct_end(&delays->lock, &delays->blkio_start, total, count); } -int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) +int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) { u64 utime, stime, stimescaled, utimescaled; unsigned long long t2, t3; @@ -117,6 +150,9 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) d->cpu_run_virtual_total = (tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp; + if (!tsk->delays) + return 0; + /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ raw_spin_lock_irqsave(&tsk->delays->lock, flags); @@ -151,21 +187,20 @@ __u64 __delayacct_blkio_ticks(struct task_struct *tsk) void __delayacct_freepages_start(void) { - current->delays->freepages_start = ktime_get_ns(); + current->delays->freepages_start = local_clock(); } void __delayacct_freepages_end(void) { - delayacct_end( - ¤t->delays->lock, - ¤t->delays->freepages_start, - ¤t->delays->freepages_delay, - ¤t->delays->freepages_count); + delayacct_end(¤t->delays->lock, + ¤t->delays->freepages_start, + ¤t->delays->freepages_delay, + ¤t->delays->freepages_count); } void __delayacct_thrashing_start(void) { - current->delays->thrashing_start = ktime_get_ns(); + current->delays->thrashing_start = local_clock(); } void __delayacct_thrashing_end(void) diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c index 5b5b6c7ec7f2..794e76b03b34 100644 --- a/kernel/dma/coherent.c +++ b/kernel/dma/coherent.c @@ -111,7 +111,7 @@ static int dma_assign_coherent_memory(struct device *dev, * Declare a region of memory to be handed out by dma_alloc_coherent() when it * is asked for coherent memory for this device. This shall only be used * from platform code, usually based on the device tree description. - * + * * phys_addr is the CPU physical address to which the memory is currently * assigned (this will be ioremapped so the CPU can access the region). * diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 14de1271463f..dadae6255d05 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -566,11 +566,9 @@ static void add_dma_entry(struct dma_debug_entry *entry) if (rc == -ENOMEM) { pr_err("cacheline tracking ENOMEM, dma-debug disabled\n"); global_disable = true; + } else if (rc == -EEXIST) { + pr_err("cacheline tracking EEXIST, overlapping mappings aren't supported\n"); } - - /* TODO: report -EEXIST errors here as overlapping mappings are - * not supported by the DMA API - */ } static int dma_debug_create_entries(gfp_t gfp) diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 8ca7d505d61c..e50df8d8f87e 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -335,6 +335,14 @@ void __init swiotlb_exit(void) } /* + * Return the offset into a iotlb slot required to keep the device happy. + */ +static unsigned int swiotlb_align_offset(struct device *dev, u64 addr) +{ + return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1); +} + +/* * Bounce: copy the swiotlb buffer from or back to the original dma location */ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, @@ -346,10 +354,17 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size size_t alloc_size = mem->slots[index].alloc_size; unsigned long pfn = PFN_DOWN(orig_addr); unsigned char *vaddr = phys_to_virt(tlb_addr); + unsigned int tlb_offset; if (orig_addr == INVALID_PHYS_ADDR) return; + tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) - + swiotlb_align_offset(dev, orig_addr); + + orig_addr += tlb_offset; + alloc_size -= tlb_offset; + if (size > alloc_size) { dev_WARN_ONCE(dev, 1, "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n", @@ -391,14 +406,6 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size #define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT)) /* - * Return the offset into a iotlb slot required to keep the device happy. - */ -static unsigned int swiotlb_align_offset(struct device *dev, u64 addr) -{ - return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1); -} - -/* * Carefully handle integer overflow which can occur when boundary_mask == ~0UL. */ static inline unsigned long get_max_slots(unsigned long boundary_mask) diff --git a/kernel/entry/common.c b/kernel/entry/common.c index a0b3b04fb596..bf16395b9e13 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -5,6 +5,7 @@ #include <linux/highmem.h> #include <linux/livepatch.h> #include <linux/audit.h> +#include <linux/tick.h> #include "common.h" @@ -186,7 +187,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, local_irq_disable_exit_to_user(); /* Check if any of the above work has queued a deferred wakeup */ - rcu_nocb_flush_deferred_wakeup(); + tick_nohz_user_enter_prepare(); ti_work = READ_ONCE(current_thread_info()->flags); } @@ -202,7 +203,7 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs) lockdep_assert_irqs_disabled(); /* Flush pending rcuog wakeup before the last need_resched() check */ - rcu_nocb_flush_deferred_wakeup(); + tick_nohz_user_enter_prepare(); if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK)) ti_work = exit_to_user_mode_loop(regs, ti_work); diff --git a/kernel/events/core.c b/kernel/events/core.c index 6fee4a7e88d7..464917096e73 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -132,6 +132,7 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info) /** * cpu_function_call - call a function on the cpu + * @cpu: target cpu to queue this function * @func: the function to be called * @info: the function call argument * @@ -3821,9 +3822,16 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, struct task_struct *task) { struct perf_cpu_context *cpuctx; - struct pmu *pmu = ctx->pmu; + struct pmu *pmu; cpuctx = __get_cpu_context(ctx); + + /* + * HACK: for HETEROGENEOUS the task context might have switched to a + * different PMU, force (re)set the context, + */ + pmu = ctx->pmu = cpuctx->ctx.pmu; + if (cpuctx->task_ctx == ctx) { if (cpuctx->sched_cb_usage) __perf_pmu_sched_task(cpuctx, true); @@ -4609,7 +4617,9 @@ find_get_context(struct pmu *pmu, struct task_struct *task, cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); ctx = &cpuctx->ctx; get_ctx(ctx); + raw_spin_lock_irqsave(&ctx->lock, flags); ++ctx->pin_count; + raw_spin_unlock_irqrestore(&ctx->lock, flags); return ctx; } @@ -6667,10 +6677,10 @@ out: return data->aux_size; } -long perf_pmu_snapshot_aux(struct perf_buffer *rb, - struct perf_event *event, - struct perf_output_handle *handle, - unsigned long size) +static long perf_pmu_snapshot_aux(struct perf_buffer *rb, + struct perf_event *event, + struct perf_output_handle *handle, + unsigned long size) { unsigned long flags; long ret; @@ -8299,8 +8309,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) if (vma->vm_flags & VM_DENYWRITE) flags |= MAP_DENYWRITE; - if (vma->vm_flags & VM_MAYEXEC) - flags |= MAP_EXECUTABLE; if (vma->vm_flags & VM_LOCKED) flags |= MAP_LOCKED; if (is_vm_hugetlb_page(vma)) @@ -8680,13 +8688,12 @@ static void perf_event_switch(struct task_struct *task, }, }; - if (!sched_in && task->state == TASK_RUNNING) + if (!sched_in && task->on_rq) { switch_event.event_id.header.misc |= PERF_RECORD_MISC_SWITCH_OUT_PREEMPT; + } - perf_iterate_sb(perf_event_switch_output, - &switch_event, - NULL); + perf_iterate_sb(perf_event_switch_output, &switch_event, NULL); } /* @@ -11917,6 +11924,7 @@ again: * @pid: target pid * @cpu: target cpu * @group_fd: group leader event fd + * @flags: perf event open flags */ SYSCALL_DEFINE5(perf_event_open, struct perf_event_attr __user *, attr_uptr, @@ -12373,6 +12381,8 @@ err_fd: * @attr: attributes of the counter to create * @cpu: cpu in which the counter is bound * @task: task to profile (NULL for percpu) + * @overflow_handler: callback to trigger when we hit the event + * @context: context data could be used in overflow_handler callback */ struct perf_event * perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index b48d7039a015..835973444a1e 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -451,6 +451,7 @@ int register_perf_hw_breakpoint(struct perf_event *bp) * register_user_hw_breakpoint - register a hardware breakpoint for user space * @attr: breakpoint attributes * @triggered: callback to trigger when we hit the breakpoint + * @context: context data could be used in the triggered callback * @tsk: pointer to 'task_struct' of the process to which the address belongs */ struct perf_event * @@ -550,6 +551,7 @@ EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); * register_wide_hw_breakpoint - register a wide breakpoint in the kernel * @attr: breakpoint attributes * @triggered: callback to trigger when we hit the breakpoint + * @context: context data could be used in the triggered callback * * @return a set of per_cpu pointers to perf events */ diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 6addc9780319..af24dc3febbe 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -453,6 +453,7 @@ static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm, * that have fixed length instructions. * * uprobe_write_opcode - write the opcode at a given virtual address. + * @auprobe: arch specific probepoint information. * @mm: the probed process address space. * @vaddr: the virtual address to store the opcode. * @opcode: opcode to be written at @vaddr. @@ -2046,8 +2047,8 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) struct vm_area_struct *vma; mmap_read_lock(mm); - vma = find_vma(mm, bp_vaddr); - if (vma && vma->vm_start <= bp_vaddr) { + vma = vma_lookup(mm, bp_vaddr); + if (vma) { if (valid_vma(vma, false)) { struct inode *inode = file_inode(vma->vm_file); loff_t offset = vaddr_to_offset(vma, bp_vaddr); diff --git a/kernel/exit.c b/kernel/exit.c index fd1c04193e18..9a89e7f36acb 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -162,7 +162,6 @@ static void __exit_signal(struct task_struct *tsk) flush_sigqueue(&sig->shared_pending); tty_kref_put(tty); } - exit_task_sigqueue_cache(tsk); } static void delayed_put_task_struct(struct rcu_head *rhp) @@ -189,7 +188,7 @@ repeat: /* don't need to get the RCU readlock here - the process is dead and * can't be modifying its own credentials. But shut RCU-lockdep up */ rcu_read_lock(); - atomic_dec(&__task_cred(p)->user->processes); + dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); rcu_read_unlock(); cgroup_release(p); diff --git a/kernel/fork.c b/kernel/fork.c index dc06afd725cb..bc94b2cc5995 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -425,7 +425,7 @@ static int memcg_charge_kernel_stack(struct task_struct *tsk) static void release_task_stack(struct task_struct *tsk) { - if (WARN_ON(tsk->state != TASK_DEAD)) + if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD)) return; /* Better to leak the stack than to free prematurely */ account_kernel_stack(tsk, -1); @@ -742,6 +742,7 @@ void __put_task_struct(struct task_struct *tsk) exit_creds(tsk); delayacct_tsk_free(tsk); put_signal_struct(tsk->signal); + sched_core_free(tsk); if (!profile_handoff_task(tsk)) free_task(tsk); @@ -824,9 +825,14 @@ void __init fork_init(void) init_task.signal->rlim[RLIMIT_SIGPENDING] = init_task.signal->rlim[RLIMIT_NPROC]; - for (i = 0; i < UCOUNT_COUNTS; i++) + for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++) init_user_ns.ucount_max[i] = max_threads/2; + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, task_rlimit(&init_task, RLIMIT_NPROC)); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, task_rlimit(&init_task, RLIMIT_MSGQUEUE)); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, task_rlimit(&init_task, RLIMIT_SIGPENDING)); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, task_rlimit(&init_task, RLIMIT_MEMLOCK)); + #ifdef CONFIG_VMAP_STACK cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache", NULL, free_vm_stack_cache); @@ -1029,7 +1035,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, mm_pgtables_bytes_init(mm); mm->map_count = 0; mm->locked_vm = 0; - atomic_set(&mm->has_pinned, 0); atomic64_set(&mm->pinned_vm, 0); memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); spin_lock_init(&mm->page_table_lock); @@ -1977,8 +1982,7 @@ static __latent_entropy struct task_struct *copy_process( DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif retval = -EAGAIN; - if (atomic_read(&p->real_cred->user->processes) >= - task_rlimit(p, RLIMIT_NPROC)) { + if (is_ucounts_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) { if (p->real_cred->user != INIT_USER && !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) goto bad_fork_free; @@ -1999,7 +2003,7 @@ static __latent_entropy struct task_struct *copy_process( goto bad_fork_cleanup_count; delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ - p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE); + p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY); p->flags |= PF_FORKNOEXEC; INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); @@ -2008,7 +2012,6 @@ static __latent_entropy struct task_struct *copy_process( spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); - p->sigqueue_cache = NULL; p->utime = p->stime = p->gtime = 0; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME @@ -2250,6 +2253,8 @@ static __latent_entropy struct task_struct *copy_process( klp_copy_process(p); + sched_core_fork(p); + spin_lock(¤t->sighand->siglock); /* @@ -2337,6 +2342,7 @@ static __latent_entropy struct task_struct *copy_process( return p; bad_fork_cancel_cgroup: + sched_core_free(p); spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); cgroup_cancel_fork(p, args); @@ -2385,10 +2391,10 @@ bad_fork_cleanup_threadgroup_lock: #endif delayacct_tsk_free(p); bad_fork_cleanup_count: - atomic_dec(&p->cred->user->processes); + dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); exit_creds(p); bad_fork_free: - p->state = TASK_DEAD; + WRITE_ONCE(p->__state, TASK_DEAD); put_task_stack(p); delayed_free_task(p); fork_out: @@ -2408,7 +2414,7 @@ static inline void init_idle_pids(struct task_struct *idle) } } -struct task_struct *fork_idle(int cpu) +struct task_struct * __init fork_idle(int cpu) { struct task_struct *task; struct kernel_clone_args args = { @@ -2998,6 +3004,12 @@ int ksys_unshare(unsigned long unshare_flags) if (err) goto bad_unshare_cleanup_cred; + if (new_cred) { + err = set_cred_ucounts(new_cred); + if (err) + goto bad_unshare_cleanup_cred; + } + if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { if (do_sysvsem) { /* diff --git a/kernel/freezer.c b/kernel/freezer.c index dc520f01f99d..45ab36ffd0e7 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c @@ -58,7 +58,7 @@ bool __refrigerator(bool check_kthr_stop) /* Hmm, should we be allowed to suspend when there are realtime processes around? */ bool was_frozen = false; - long save = current->state; + unsigned int save = get_current_state(); pr_debug("%s entered refrigerator\n", current->comm); diff --git a/kernel/futex.c b/kernel/futex.c index 4938a00bc785..2ecb07575055 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -35,7 +35,6 @@ #include <linux/jhash.h> #include <linux/pagemap.h> #include <linux/syscalls.h> -#include <linux/hugetlb.h> #include <linux/freezer.h> #include <linux/memblock.h> #include <linux/fault-inject.h> @@ -650,7 +649,7 @@ again: key->both.offset |= FUT_OFF_INODE; /* inode-based key */ key->shared.i_seq = get_inode_sequence_number(inode); - key->shared.pgoff = basepage_index(tail); + key->shared.pgoff = page_to_pgoff(tail); rcu_read_unlock(); } @@ -1728,12 +1727,9 @@ retry_private: return ret; } - if (!(flags & FLAGS_SHARED)) { - cond_resched(); - goto retry_private; - } - cond_resched(); + if (!(flags & FLAGS_SHARED)) + goto retry_private; goto retry; } @@ -1874,7 +1870,7 @@ futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, * If the caller intends to requeue more than 1 waiter to pifutex, * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now, * as we have means to handle the possible fault. If not, don't set - * the bit unecessarily as it will force the subsequent unlock to enter + * the bit unnecessarily as it will force the subsequent unlock to enter * the kernel. */ top_waiter = futex_top_waiter(hb1, key1); @@ -2103,7 +2099,7 @@ retry_private: continue; /* - * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always + * FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI should always * be paired with each other and no other futex ops. * * We should never be requeueing a futex_q with a pi_state, @@ -2318,7 +2314,7 @@ retry: } /* - * PI futexes can not be requeued and must remove themself from the + * PI futexes can not be requeued and must remove themselves from the * hash bucket. The hash bucket lock (i.e. lock_ptr) is held. */ static void unqueue_me_pi(struct futex_q *q) @@ -2786,7 +2782,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, if (refill_pi_state_cache()) return -ENOMEM; - to = futex_setup_timer(time, &timeout, FLAGS_CLOCKRT, 0); + to = futex_setup_timer(time, &timeout, flags, 0); retry: ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE); @@ -2903,7 +2899,7 @@ no_block: */ res = fixup_owner(uaddr, &q, !ret); /* - * If fixup_owner() returned an error, proprogate that. If it acquired + * If fixup_owner() returned an error, propagate that. If it acquired * the lock, clear our -ETIMEDOUT or -EINTR. */ if (res) @@ -3280,7 +3276,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, */ res = fixup_owner(uaddr2, &q, !ret); /* - * If fixup_owner() returned an error, proprogate that. If it + * If fixup_owner() returned an error, propagate that. If it * acquired the lock, clear -ETIMEDOUT or -EINTR. */ if (res) @@ -3678,7 +3674,7 @@ void futex_exec_release(struct task_struct *tsk) { /* * The state handling is done for consistency, but in the case of - * exec() there is no way to prevent futher damage as the PID stays + * exec() there is no way to prevent further damage as the PID stays * the same. But for the unlikely and arguably buggy case that a * futex is held on exec(), this provides at least as much state * consistency protection which is possible. @@ -3710,12 +3706,14 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, if (op & FUTEX_CLOCK_REALTIME) { flags |= FLAGS_CLOCKRT; - if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI) + if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI && + cmd != FUTEX_LOCK_PI2) return -ENOSYS; } switch (cmd) { case FUTEX_LOCK_PI: + case FUTEX_LOCK_PI2: case FUTEX_UNLOCK_PI: case FUTEX_TRYLOCK_PI: case FUTEX_WAIT_REQUEUE_PI: @@ -3742,6 +3740,9 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, case FUTEX_WAKE_OP: return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); case FUTEX_LOCK_PI: + flags |= FLAGS_CLOCKRT; + fallthrough; + case FUTEX_LOCK_PI2: return futex_lock_pi(uaddr, flags, timeout, 0); case FUTEX_UNLOCK_PI: return futex_unlock_pi(uaddr, flags); @@ -3762,6 +3763,7 @@ static __always_inline bool futex_cmd_has_timeout(u32 cmd) switch (cmd) { case FUTEX_WAIT: case FUTEX_LOCK_PI: + case FUTEX_LOCK_PI2: case FUTEX_WAIT_BITSET: case FUTEX_WAIT_REQUEUE_PI: return true; diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig index 58f87a3092f3..053447183ac5 100644 --- a/kernel/gcov/Kconfig +++ b/kernel/gcov/Kconfig @@ -5,6 +5,7 @@ config GCOV_KERNEL bool "Enable gcov-based kernel profiling" depends on DEBUG_FS depends on !CC_IS_CLANG || CLANG_VERSION >= 110000 + depends on !ARCH_WANTS_NO_INSTR || CC_HAS_NO_PROFILE_FN_ATTR select CONSTRUCTORS default n help diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 396ebaebea3f..9888e2bc8c76 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -15,6 +15,7 @@ #include <linux/kthread.h> #include <linux/lockdep.h> #include <linux/export.h> +#include <linux/panic_notifier.h> #include <linux/sysctl.h> #include <linux/suspend.h> #include <linux/utsname.h> @@ -196,7 +197,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) last_break = jiffies; } /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ - if (t->state == TASK_UNINTERRUPTIBLE) + if (READ_ONCE(t->__state) == TASK_UNINTERRUPTIBLE) check_hung_task(t, timeout); } unlock: diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index d79ef2493a28..fbc54c2a7f23 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -70,6 +70,11 @@ config IRQ_DOMAIN_HIERARCHY bool select IRQ_DOMAIN +# Support for obsolete non-mapping irq domains +config IRQ_DOMAIN_NOMAP + bool + select IRQ_DOMAIN + # Support for hierarchical fasteoi+edge and fasteoi+level handlers config IRQ_FASTEOI_HIERARCHY_HANDLERS bool diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 8cc8e5713287..7f04c7d8296e 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -481,7 +481,7 @@ void handle_nested_irq(unsigned int irq) for_each_action_of_desc(desc, action) action_ret |= action->thread_fn(action->irq, action->dev_id); - if (!noirqdebug) + if (!irq_settings_no_debug(desc)) note_interrupt(desc, action_ret); raw_spin_lock_irq(&desc->lock); diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 762a928e18f9..221d80c31e94 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -197,7 +197,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) add_interrupt_randomness(desc->irq_data.irq, flags); - if (!noirqdebug) + if (!irq_settings_no_debug(desc)) note_interrupt(desc, retval); return retval; } diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 4a617d7312a4..f4dd5186858a 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -632,14 +632,8 @@ void irq_init_desc(unsigned int irq) #endif /* !CONFIG_SPARSE_IRQ */ -/** - * generic_handle_irq - Invoke the handler for a particular irq - * @irq: The irq number to handle - * - */ -int generic_handle_irq(unsigned int irq) +int handle_irq_desc(struct irq_desc *desc) { - struct irq_desc *desc = irq_to_desc(irq); struct irq_data *data; if (!desc) @@ -652,11 +646,40 @@ int generic_handle_irq(unsigned int irq) generic_handle_irq_desc(desc); return 0; } +EXPORT_SYMBOL_GPL(handle_irq_desc); + +/** + * generic_handle_irq - Invoke the handler for a particular irq + * @irq: The irq number to handle + * + */ +int generic_handle_irq(unsigned int irq) +{ + return handle_irq_desc(irq_to_desc(irq)); +} EXPORT_SYMBOL_GPL(generic_handle_irq); +#ifdef CONFIG_IRQ_DOMAIN +/** + * generic_handle_domain_irq - Invoke the handler for a HW irq belonging + * to a domain, usually for a non-root interrupt + * controller + * @domain: The domain where to perform the lookup + * @hwirq: The HW irq number to convert to a logical one + * + * Returns: 0 on success, or -EINVAL if conversion has failed + * + */ +int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq) +{ + return handle_irq_desc(irq_resolve_mapping(domain, hwirq)); +} +EXPORT_SYMBOL_GPL(generic_handle_domain_irq); + #ifdef CONFIG_HANDLE_DOMAIN_IRQ /** - * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain + * handle_domain_irq - Invoke the handler for a HW irq belonging to a domain, + * usually for a root interrupt controller * @domain: The domain where to perform the lookup * @hwirq: The HW irq number to convert to a logical one * @lookup: Whether to perform the domain lookup or not @@ -664,37 +687,27 @@ EXPORT_SYMBOL_GPL(generic_handle_irq); * * Returns: 0 on success, or -EINVAL if conversion has failed */ -int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, - bool lookup, struct pt_regs *regs) +int handle_domain_irq(struct irq_domain *domain, + unsigned int hwirq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); - unsigned int irq = hwirq; + struct irq_desc *desc; int ret = 0; irq_enter(); -#ifdef CONFIG_IRQ_DOMAIN - if (lookup) - irq = irq_find_mapping(domain, hwirq); -#endif - - /* - * Some hardware gives randomly wrong interrupts. Rather - * than crashing, do something sensible. - */ - if (unlikely(!irq || irq >= nr_irqs)) { - ack_bad_irq(irq); + /* The irqdomain code provides boundary checks */ + desc = irq_resolve_mapping(domain, hwirq); + if (likely(desc)) + handle_irq_desc(desc); + else ret = -EINVAL; - } else { - generic_handle_irq(irq); - } irq_exit(); set_irq_regs(old_regs); return ret; } -#ifdef CONFIG_IRQ_DOMAIN /** * handle_domain_nmi - Invoke the handler for a HW irq belonging to a domain * @domain: The domain where to perform the lookup @@ -709,7 +722,7 @@ int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); - unsigned int irq; + struct irq_desc *desc; int ret = 0; /* @@ -717,14 +730,14 @@ int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, */ WARN_ON(!in_nmi()); - irq = irq_find_mapping(domain, hwirq); + desc = irq_resolve_mapping(domain, hwirq); /* * ack_bad_irq is not NMI-safe, just report * an invalid interrupt. */ - if (likely(irq)) - generic_handle_irq(irq); + if (likely(desc)) + handle_irq_desc(desc); else ret = -EINVAL; diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 6284443b87ec..51c483ce2447 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -146,7 +146,11 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, static atomic_t unknown_domains; - domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), + if (WARN_ON((size && direct_max) || + (!IS_ENABLED(CONFIG_IRQ_DOMAIN_NOMAP) && direct_max))) + return NULL; + + domain = kzalloc_node(struct_size(domain, revmap, size), GFP_KERNEL, of_node_to_nid(to_of_node(fwnode))); if (!domain) return NULL; @@ -209,12 +213,18 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, /* Fill structure */ INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); - mutex_init(&domain->revmap_tree_mutex); + mutex_init(&domain->revmap_mutex); domain->ops = ops; domain->host_data = host_data; domain->hwirq_max = hwirq_max; + + if (direct_max) { + size = direct_max; + domain->flags |= IRQ_DOMAIN_FLAG_NO_MAP; + } + domain->revmap_size = size; - domain->revmap_direct_max_irq = direct_max; + irq_domain_check_hierarchy(domain); mutex_lock(&irq_domain_mutex); @@ -482,29 +492,39 @@ struct irq_domain *irq_get_default_host(void) return irq_default_domain; } +static bool irq_domain_is_nomap(struct irq_domain *domain) +{ + return IS_ENABLED(CONFIG_IRQ_DOMAIN_NOMAP) && + (domain->flags & IRQ_DOMAIN_FLAG_NO_MAP); +} + static void irq_domain_clear_mapping(struct irq_domain *domain, irq_hw_number_t hwirq) { - if (hwirq < domain->revmap_size) { - domain->linear_revmap[hwirq] = 0; - } else { - mutex_lock(&domain->revmap_tree_mutex); + if (irq_domain_is_nomap(domain)) + return; + + mutex_lock(&domain->revmap_mutex); + if (hwirq < domain->revmap_size) + rcu_assign_pointer(domain->revmap[hwirq], NULL); + else radix_tree_delete(&domain->revmap_tree, hwirq); - mutex_unlock(&domain->revmap_tree_mutex); - } + mutex_unlock(&domain->revmap_mutex); } static void irq_domain_set_mapping(struct irq_domain *domain, irq_hw_number_t hwirq, struct irq_data *irq_data) { - if (hwirq < domain->revmap_size) { - domain->linear_revmap[hwirq] = irq_data->irq; - } else { - mutex_lock(&domain->revmap_tree_mutex); + if (irq_domain_is_nomap(domain)) + return; + + mutex_lock(&domain->revmap_mutex); + if (hwirq < domain->revmap_size) + rcu_assign_pointer(domain->revmap[hwirq], irq_data); + else radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); - mutex_unlock(&domain->revmap_tree_mutex); - } + mutex_unlock(&domain->revmap_mutex); } static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) @@ -604,6 +624,7 @@ void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, } EXPORT_SYMBOL_GPL(irq_domain_associate_many); +#ifdef CONFIG_IRQ_DOMAIN_NOMAP /** * irq_create_direct_mapping() - Allocate an irq for direct mapping * @domain: domain to allocate the irq for or NULL for default domain @@ -628,9 +649,9 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain) pr_debug("create_direct virq allocation failed\n"); return 0; } - if (virq >= domain->revmap_direct_max_irq) { + if (virq >= domain->revmap_size) { pr_err("ERROR: no free irqs available below %i maximum\n", - domain->revmap_direct_max_irq); + domain->revmap_size); irq_free_desc(virq); return 0; } @@ -644,6 +665,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain) return virq; } EXPORT_SYMBOL_GPL(irq_create_direct_mapping); +#endif /** * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space @@ -862,37 +884,53 @@ void irq_dispose_mapping(unsigned int virq) EXPORT_SYMBOL_GPL(irq_dispose_mapping); /** - * irq_find_mapping() - Find a linux irq from a hw irq number. + * __irq_resolve_mapping() - Find a linux irq from a hw irq number. * @domain: domain owning this hardware interrupt * @hwirq: hardware irq number in that domain space + * @irq: optional pointer to return the Linux irq if required + * + * Returns the interrupt descriptor. */ -unsigned int irq_find_mapping(struct irq_domain *domain, - irq_hw_number_t hwirq) +struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain, + irq_hw_number_t hwirq, + unsigned int *irq) { + struct irq_desc *desc = NULL; struct irq_data *data; /* Look for default domain if necessary */ if (domain == NULL) domain = irq_default_domain; if (domain == NULL) - return 0; + return desc; + + if (irq_domain_is_nomap(domain)) { + if (hwirq < domain->revmap_size) { + data = irq_domain_get_irq_data(domain, hwirq); + if (data && data->hwirq == hwirq) + desc = irq_data_to_desc(data); + } - if (hwirq < domain->revmap_direct_max_irq) { - data = irq_domain_get_irq_data(domain, hwirq); - if (data && data->hwirq == hwirq) - return hwirq; + return desc; } + rcu_read_lock(); /* Check if the hwirq is in the linear revmap. */ if (hwirq < domain->revmap_size) - return domain->linear_revmap[hwirq]; + data = rcu_dereference(domain->revmap[hwirq]); + else + data = radix_tree_lookup(&domain->revmap_tree, hwirq); + + if (likely(data)) { + desc = irq_data_to_desc(data); + if (irq) + *irq = data->irq; + } - rcu_read_lock(); - data = radix_tree_lookup(&domain->revmap_tree, hwirq); rcu_read_unlock(); - return data ? data->irq : 0; + return desc; } -EXPORT_SYMBOL_GPL(irq_find_mapping); +EXPORT_SYMBOL_GPL(__irq_resolve_mapping); /** * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings @@ -1468,15 +1506,20 @@ static void irq_domain_fix_revmap(struct irq_data *d) { void __rcu **slot; - if (d->hwirq < d->domain->revmap_size) - return; /* Not using radix tree. */ + if (irq_domain_is_nomap(d->domain)) + return; /* Fix up the revmap. */ - mutex_lock(&d->domain->revmap_tree_mutex); - slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq); - if (slot) - radix_tree_replace_slot(&d->domain->revmap_tree, slot, d); - mutex_unlock(&d->domain->revmap_tree_mutex); + mutex_lock(&d->domain->revmap_mutex); + if (d->hwirq < d->domain->revmap_size) { + /* Not using radix tree */ + rcu_assign_pointer(d->domain->revmap[d->hwirq], d); + } else { + slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq); + if (slot) + radix_tree_replace_slot(&d->domain->revmap_tree, slot, d); + } + mutex_unlock(&d->domain->revmap_mutex); } /** @@ -1828,8 +1871,7 @@ static void irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind) { seq_printf(m, "%*sname: %s\n", ind, "", d->name); - seq_printf(m, "%*ssize: %u\n", ind + 1, "", - d->revmap_size + d->revmap_direct_max_irq); + seq_printf(m, "%*ssize: %u\n", ind + 1, "", d->revmap_size); seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount); seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags); if (d->ops && d->ops->debug_show) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 4c14356543d9..ef30b4762947 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -441,7 +441,8 @@ out_unlock: return ret; } -int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) +static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, + bool force) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; @@ -456,6 +457,36 @@ int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) return ret; } +/** + * irq_set_affinity - Set the irq affinity of a given irq + * @irq: Interrupt to set affinity + * @cpumask: cpumask + * + * Fails if cpumask does not contain an online CPU + */ +int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) +{ + return __irq_set_affinity(irq, cpumask, false); +} +EXPORT_SYMBOL_GPL(irq_set_affinity); + +/** + * irq_force_affinity - Force the irq affinity of a given irq + * @irq: Interrupt to set affinity + * @cpumask: cpumask + * + * Same as irq_set_affinity, but without checking the mask against + * online cpus. + * + * Solely for low level cpu hotplug code, where we need to make per + * cpu interrupts affine before the cpu becomes online. + */ +int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) +{ + return __irq_set_affinity(irq, cpumask, true); +} +EXPORT_SYMBOL_GPL(irq_force_affinity); + int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) { unsigned long flags; @@ -1686,8 +1717,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) if (new->flags & IRQF_PERCPU) { irqd_set(&desc->irq_data, IRQD_PER_CPU); irq_settings_set_per_cpu(desc); + if (new->flags & IRQF_NO_DEBUG) + irq_settings_set_no_debug(desc); } + if (noirqdebug) + irq_settings_set_no_debug(desc); + if (new->flags & IRQF_ONESHOT) desc->istate |= IRQS_ONESHOT; diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index 403378b9947b..7b7efb1a114b 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h @@ -18,6 +18,7 @@ enum { _IRQ_IS_POLLED = IRQ_IS_POLLED, _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY, _IRQ_HIDDEN = IRQ_HIDDEN, + _IRQ_NO_DEBUG = IRQ_NO_DEBUG, _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, }; @@ -33,6 +34,7 @@ enum { #define IRQ_IS_POLLED GOT_YOU_MORON #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON #define IRQ_HIDDEN GOT_YOU_MORON +#define IRQ_NO_DEBUG GOT_YOU_MORON #undef IRQF_MODIFY_MASK #define IRQF_MODIFY_MASK GOT_YOU_MORON @@ -174,3 +176,13 @@ static inline bool irq_settings_is_hidden(struct irq_desc *desc) { return desc->status_use_accessors & _IRQ_HIDDEN; } + +static inline void irq_settings_set_no_debug(struct irq_desc *desc) +{ + desc->status_use_accessors |= _IRQ_NO_DEBUG; +} + +static inline bool irq_settings_no_debug(struct irq_desc *desc) +{ + return desc->status_use_accessors & _IRQ_NO_DEBUG; +} diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 23a7a0ba1388..db8c248ebc8c 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -70,9 +70,6 @@ bool irq_work_queue(struct irq_work *work) if (!irq_work_claim(work)) return false; - /*record irq_work call stack in order to print it in KASAN reports*/ - kasan_record_aux_stack(work); - /* Queue the entry and raise the IPI if needed. */ preempt_disable(); __irq_work_queue_local(work); diff --git a/kernel/jump_label.c b/kernel/jump_label.c index ba39fbb1f8e7..bdb0681bece8 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -309,7 +309,7 @@ EXPORT_SYMBOL_GPL(jump_label_rate_limit); static int addr_conflict(struct jump_entry *entry, void *start, void *end) { if (jump_entry_code(entry) <= (unsigned long)end && - jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start) + jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start) return 1; return 0; @@ -483,13 +483,14 @@ void __init jump_label_init(void) for (iter = iter_start; iter < iter_stop; iter++) { struct static_key *iterk; + bool in_init; /* rewrite NOPs */ if (jump_label_type(iter) == JUMP_LABEL_NOP) arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); - if (init_section_contains((void *)jump_entry_code(iter), 1)) - jump_entry_set_init(iter); + in_init = init_section_contains((void *)jump_entry_code(iter), 1); + jump_entry_set_init(iter, in_init); iterk = jump_entry_key(iter); if (iterk == key) @@ -634,9 +635,10 @@ static int jump_label_add_module(struct module *mod) for (iter = iter_start; iter < iter_stop; iter++) { struct static_key *iterk; + bool in_init; - if (within_module_init(jump_entry_code(iter), mod)) - jump_entry_set_init(iter); + in_init = within_module_init(jump_entry_code(iter), mod); + jump_entry_set_init(iter, in_init); iterk = jump_entry_key(iter); if (iterk == key) diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 45c821d4e8bd..26709ea65c71 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -380,9 +380,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, if (consumed) { kcsan_save_irqtrace(current); - kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE, - KCSAN_REPORT_CONSUMED_WATCHPOINT, - watchpoint - watchpoints); + kcsan_report_set_info(ptr, size, type, watchpoint - watchpoints); kcsan_restore_irqtrace(current); } else { /* @@ -407,12 +405,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0; const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0; atomic_long_t *watchpoint; - union { - u8 _1; - u16 _2; - u32 _4; - u64 _8; - } expect_value; + u64 old, new, diff; unsigned long access_mask; enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE; unsigned long ua_flags = user_access_save(); @@ -468,19 +461,19 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) * Read the current value, to later check and infer a race if the data * was modified via a non-instrumented access, e.g. from a device. */ - expect_value._8 = 0; + old = 0; switch (size) { case 1: - expect_value._1 = READ_ONCE(*(const u8 *)ptr); + old = READ_ONCE(*(const u8 *)ptr); break; case 2: - expect_value._2 = READ_ONCE(*(const u16 *)ptr); + old = READ_ONCE(*(const u16 *)ptr); break; case 4: - expect_value._4 = READ_ONCE(*(const u32 *)ptr); + old = READ_ONCE(*(const u32 *)ptr); break; case 8: - expect_value._8 = READ_ONCE(*(const u64 *)ptr); + old = READ_ONCE(*(const u64 *)ptr); break; default: break; /* ignore; we do not diff the values */ @@ -506,33 +499,30 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) * racy access. */ access_mask = get_ctx()->access_mask; + new = 0; switch (size) { case 1: - expect_value._1 ^= READ_ONCE(*(const u8 *)ptr); - if (access_mask) - expect_value._1 &= (u8)access_mask; + new = READ_ONCE(*(const u8 *)ptr); break; case 2: - expect_value._2 ^= READ_ONCE(*(const u16 *)ptr); - if (access_mask) - expect_value._2 &= (u16)access_mask; + new = READ_ONCE(*(const u16 *)ptr); break; case 4: - expect_value._4 ^= READ_ONCE(*(const u32 *)ptr); - if (access_mask) - expect_value._4 &= (u32)access_mask; + new = READ_ONCE(*(const u32 *)ptr); break; case 8: - expect_value._8 ^= READ_ONCE(*(const u64 *)ptr); - if (access_mask) - expect_value._8 &= (u64)access_mask; + new = READ_ONCE(*(const u64 *)ptr); break; default: break; /* ignore; we do not diff the values */ } + diff = old ^ new; + if (access_mask) + diff &= access_mask; + /* Were we able to observe a value-change? */ - if (expect_value._8 != 0) + if (diff != 0) value_change = KCSAN_VALUE_CHANGE_TRUE; /* Check if this access raced with another. */ @@ -566,8 +556,9 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE) atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]); - kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL, - watchpoint - watchpoints); + kcsan_report_known_origin(ptr, size, type, value_change, + watchpoint - watchpoints, + old, new, access_mask); } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) { /* Inferring a race, since the value should not have changed. */ @@ -576,9 +567,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]); if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) - kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE, - KCSAN_REPORT_RACE_UNKNOWN_ORIGIN, - watchpoint - watchpoints); + kcsan_report_unknown_origin(ptr, size, type, old, new, access_mask); } /* diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h index 9881099d4179..f36e25c497ed 100644 --- a/kernel/kcsan/kcsan.h +++ b/kernel/kcsan/kcsan.h @@ -116,30 +116,27 @@ enum kcsan_value_change { KCSAN_VALUE_CHANGE_TRUE, }; -enum kcsan_report_type { - /* - * The thread that set up the watchpoint and briefly stalled was - * signalled that another thread triggered the watchpoint. - */ - KCSAN_REPORT_RACE_SIGNAL, - - /* - * A thread found and consumed a matching watchpoint. - */ - KCSAN_REPORT_CONSUMED_WATCHPOINT, +/* + * The calling thread hit and consumed a watchpoint: set the access information + * to be consumed by the reporting thread. No report is printed yet. + */ +void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type, + int watchpoint_idx); - /* - * No other thread was observed to race with the access, but the data - * value before and after the stall differs. - */ - KCSAN_REPORT_RACE_UNKNOWN_ORIGIN, -}; +/* + * The calling thread observed that the watchpoint it set up was hit and + * consumed: print the full report based on information set by the racing + * thread. + */ +void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type, + enum kcsan_value_change value_change, int watchpoint_idx, + u64 old, u64 new, u64 mask); /* - * Print a race report from thread that encountered the race. + * No other thread was observed to race with the access, but the data value + * before and after the stall differs. Reports a race of "unknown origin". */ -extern void kcsan_report(const volatile void *ptr, size_t size, int access_type, - enum kcsan_value_change value_change, - enum kcsan_report_type type, int watchpoint_idx); +void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type, + u64 old, u64 new, u64 mask); #endif /* _KERNEL_KCSAN_KCSAN_H */ diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index 13dce3c664d6..21137929d428 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -325,13 +325,10 @@ static void print_verbose_info(struct task_struct *task) print_irqtrace_events(task); } -/* - * Returns true if a report was generated, false otherwise. - */ -static bool print_report(enum kcsan_value_change value_change, - enum kcsan_report_type type, +static void print_report(enum kcsan_value_change value_change, const struct access_info *ai, - const struct other_info *other_info) + const struct other_info *other_info, + u64 old, u64 new, u64 mask) { unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 }; int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1); @@ -344,25 +341,24 @@ static bool print_report(enum kcsan_value_change value_change, * Must check report filter rules before starting to print. */ if (skip_report(KCSAN_VALUE_CHANGE_TRUE, stack_entries[skipnr])) - return false; + return; - if (type == KCSAN_REPORT_RACE_SIGNAL) { + if (other_info) { other_skipnr = get_stack_skipnr(other_info->stack_entries, other_info->num_stack_entries); other_frame = other_info->stack_entries[other_skipnr]; /* @value_change is only known for the other thread */ if (skip_report(value_change, other_frame)) - return false; + return; } if (rate_limit_report(this_frame, other_frame)) - return false; + return; /* Print report header. */ pr_err("==================================================================\n"); - switch (type) { - case KCSAN_REPORT_RACE_SIGNAL: { + if (other_info) { int cmp; /* @@ -374,22 +370,15 @@ static bool print_report(enum kcsan_value_change value_change, get_bug_type(ai->access_type | other_info->ai.access_type), (void *)(cmp < 0 ? other_frame : this_frame), (void *)(cmp < 0 ? this_frame : other_frame)); - } break; - - case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN: + } else { pr_err("BUG: KCSAN: %s in %pS\n", get_bug_type(ai->access_type), (void *)this_frame); - break; - - default: - BUG(); } pr_err("\n"); /* Print information about the racing accesses. */ - switch (type) { - case KCSAN_REPORT_RACE_SIGNAL: + if (other_info) { pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n", get_access_type(other_info->ai.access_type), other_info->ai.ptr, other_info->ai.size, get_thread_desc(other_info->ai.task_pid), @@ -407,16 +396,10 @@ static bool print_report(enum kcsan_value_change value_change, pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n", get_access_type(ai->access_type), ai->ptr, ai->size, get_thread_desc(ai->task_pid), ai->cpu_id); - break; - - case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN: + } else { pr_err("race at unknown origin, with %s to 0x%px of %zu bytes by %s on cpu %i:\n", get_access_type(ai->access_type), ai->ptr, ai->size, get_thread_desc(ai->task_pid), ai->cpu_id); - break; - - default: - BUG(); } /* Print stack trace of this thread. */ stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr, @@ -425,24 +408,41 @@ static bool print_report(enum kcsan_value_change value_change, if (IS_ENABLED(CONFIG_KCSAN_VERBOSE)) print_verbose_info(current); + /* Print observed value change. */ + if (ai->size <= 8) { + int hex_len = ai->size * 2; + u64 diff = old ^ new; + + if (mask) + diff &= mask; + if (diff) { + pr_err("\n"); + pr_err("value changed: 0x%0*llx -> 0x%0*llx\n", + hex_len, old, hex_len, new); + if (mask) { + pr_err(" bits changed: 0x%0*llx with mask 0x%0*llx\n", + hex_len, diff, hex_len, mask); + } + } + } + /* Print report footer. */ pr_err("\n"); pr_err("Reported by Kernel Concurrency Sanitizer on:\n"); dump_stack_print_info(KERN_DEFAULT); pr_err("==================================================================\n"); - return true; + if (panic_on_warn) + panic("panic_on_warn set ...\n"); } static void release_report(unsigned long *flags, struct other_info *other_info) { - if (other_info) - /* - * Use size to denote valid/invalid, since KCSAN entirely - * ignores 0-sized accesses. - */ - other_info->ai.size = 0; - + /* + * Use size to denote valid/invalid, since KCSAN entirely ignores + * 0-sized accesses. + */ + other_info->ai.size = 0; raw_spin_unlock_irqrestore(&report_lock, *flags); } @@ -460,7 +460,7 @@ static void set_other_info_task_blocking(unsigned long *flags, * We may be instrumenting a code-path where current->state is already * something other than TASK_RUNNING. */ - const bool is_running = current->state == TASK_RUNNING; + const bool is_running = task_is_running(current); /* * To avoid deadlock in case we are in an interrupt here and this is a * race with a task on the same CPU (KCSAN_INTERRUPT_WATCHER), provide a @@ -575,48 +575,42 @@ discard: return false; } -/* - * Depending on the report type either sets @other_info and returns false, or - * awaits @other_info and returns true. If @other_info is not required for the - * report type, simply acquires @report_lock and returns true. - */ -static noinline bool prepare_report(unsigned long *flags, - enum kcsan_report_type type, - const struct access_info *ai, - struct other_info *other_info) +static struct access_info prepare_access_info(const volatile void *ptr, size_t size, + int access_type) { - switch (type) { - case KCSAN_REPORT_CONSUMED_WATCHPOINT: - prepare_report_producer(flags, ai, other_info); - return false; - case KCSAN_REPORT_RACE_SIGNAL: - return prepare_report_consumer(flags, ai, other_info); - default: - /* @other_info not required; just acquire @report_lock. */ - raw_spin_lock_irqsave(&report_lock, *flags); - return true; - } -} - -void kcsan_report(const volatile void *ptr, size_t size, int access_type, - enum kcsan_value_change value_change, - enum kcsan_report_type type, int watchpoint_idx) -{ - unsigned long flags = 0; - const struct access_info ai = { + return (struct access_info) { .ptr = ptr, .size = size, .access_type = access_type, .task_pid = in_task() ? task_pid_nr(current) : -1, .cpu_id = raw_smp_processor_id() }; - struct other_info *other_info = type == KCSAN_REPORT_RACE_UNKNOWN_ORIGIN - ? NULL : &other_infos[watchpoint_idx]; +} + +void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type, + int watchpoint_idx) +{ + const struct access_info ai = prepare_access_info(ptr, size, access_type); + unsigned long flags; kcsan_disable_current(); - if (WARN_ON(watchpoint_idx < 0 || watchpoint_idx >= ARRAY_SIZE(other_infos))) - goto out; + lockdep_off(); /* See kcsan_report_known_origin(). */ + + prepare_report_producer(&flags, &ai, &other_infos[watchpoint_idx]); + + lockdep_on(); + kcsan_enable_current(); +} + +void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type, + enum kcsan_value_change value_change, int watchpoint_idx, + u64 old, u64 new, u64 mask) +{ + const struct access_info ai = prepare_access_info(ptr, size, access_type); + struct other_info *other_info = &other_infos[watchpoint_idx]; + unsigned long flags = 0; + kcsan_disable_current(); /* * Because we may generate reports when we're in scheduler code, the use * of printk() could deadlock. Until such time that all printing code @@ -626,22 +620,35 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type, */ lockdep_off(); - if (prepare_report(&flags, type, &ai, other_info)) { - /* - * Never report if value_change is FALSE, only if we it is - * either TRUE or MAYBE. In case of MAYBE, further filtering may - * be done once we know the full stack trace in print_report(). - */ - bool reported = value_change != KCSAN_VALUE_CHANGE_FALSE && - print_report(value_change, type, &ai, other_info); + if (!prepare_report_consumer(&flags, &ai, other_info)) + goto out; + /* + * Never report if value_change is FALSE, only when it is + * either TRUE or MAYBE. In case of MAYBE, further filtering may + * be done once we know the full stack trace in print_report(). + */ + if (value_change != KCSAN_VALUE_CHANGE_FALSE) + print_report(value_change, &ai, other_info, old, new, mask); - if (reported && panic_on_warn) - panic("panic_on_warn set ...\n"); + release_report(&flags, other_info); +out: + lockdep_on(); + kcsan_enable_current(); +} - release_report(&flags, other_info); - } +void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type, + u64 old, u64 new, u64 mask) +{ + const struct access_info ai = prepare_access_info(ptr, size, access_type); + unsigned long flags; + + kcsan_disable_current(); + lockdep_off(); /* See kcsan_report_known_origin(). */ + + raw_spin_lock_irqsave(&report_lock, flags); + print_report(KCSAN_VALUE_CHANGE_TRUE, &ai, NULL, old, new, mask); + raw_spin_unlock_irqrestore(&report_lock, flags); lockdep_on(); -out: kcsan_enable_current(); } diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index f099baee3578..4b34a9aa32bc 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -26,6 +26,7 @@ #include <linux/suspend.h> #include <linux/device.h> #include <linux/freezer.h> +#include <linux/panic_notifier.h> #include <linux/pm.h> #include <linux/cpu.h> #include <linux/uaccess.h> diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 745f08fdd7a6..471b1d18a92f 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -106,7 +106,7 @@ void __weak *alloc_insn_page(void) return module_alloc(PAGE_SIZE); } -void __weak free_insn_page(void *page) +static void free_insn_page(void *page) { module_memfree(page); } @@ -321,11 +321,21 @@ int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum, } #ifdef CONFIG_OPTPROBES +void __weak *alloc_optinsn_page(void) +{ + return alloc_insn_page(); +} + +void __weak free_optinsn_page(void *page) +{ + free_insn_page(page); +} + /* For optimized_kprobe buffer */ struct kprobe_insn_cache kprobe_optinsn_slots = { .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), - .alloc = alloc_insn_page, - .free = free_insn_page, + .alloc = alloc_optinsn_page, + .free = free_optinsn_page, .sym = KPROBE_OPTINSN_PAGE_SYM, .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), /* .insn_size is initialized later */ @@ -1183,23 +1193,6 @@ static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, } NOKPROBE_SYMBOL(aggr_post_handler); -static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, - int trapnr) -{ - struct kprobe *cur = __this_cpu_read(kprobe_instance); - - /* - * if we faulted "during" the execution of a user specified - * probe handler, invoke just that probe's fault handler - */ - if (cur && cur->fault_handler) { - if (cur->fault_handler(cur, regs, trapnr)) - return 1; - } - return 0; -} -NOKPROBE_SYMBOL(aggr_fault_handler); - /* Walks the list and increments nmissed count for multiprobe case */ void kprobes_inc_nmissed_count(struct kprobe *p) { @@ -1330,7 +1323,6 @@ static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) ap->addr = p->addr; ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; ap->pre_handler = aggr_pre_handler; - ap->fault_handler = aggr_fault_handler; /* We don't care the kprobe which has gone. */ if (p->post_handler && !kprobe_gone(p)) ap->post_handler = aggr_post_handler; @@ -2014,7 +2006,6 @@ int register_kretprobe(struct kretprobe *rp) rp->kp.pre_handler = pre_handler_kretprobe; rp->kp.post_handler = NULL; - rp->kp.fault_handler = NULL; /* Pre-allocate memory for max kretprobe instances */ if (rp->maxactive <= 0) { diff --git a/kernel/kthread.c b/kernel/kthread.c index fe3f2a40d61e..5b37a8567168 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -68,16 +68,6 @@ enum KTHREAD_BITS { KTHREAD_SHOULD_PARK, }; -static inline void set_kthread_struct(void *kthread) -{ - /* - * We abuse ->set_child_tid to avoid the new member and because it - * can't be wrongly copied by copy_process(). We also rely on fact - * that the caller can't exec, so PF_KTHREAD can't be cleared. - */ - current->set_child_tid = (__force void __user *)kthread; -} - static inline struct kthread *to_kthread(struct task_struct *k) { WARN_ON(!(k->flags & PF_KTHREAD)); @@ -103,6 +93,22 @@ static inline struct kthread *__to_kthread(struct task_struct *p) return kthread; } +void set_kthread_struct(struct task_struct *p) +{ + struct kthread *kthread; + + if (__to_kthread(p)) + return; + + kthread = kzalloc(sizeof(*kthread), GFP_KERNEL); + /* + * We abuse ->set_child_tid to avoid the new member and because it + * can't be wrongly copied by copy_process(). We also rely on fact + * that the caller can't exec, so PF_KTHREAD can't be cleared. + */ + p->set_child_tid = (__force void __user *)kthread; +} + void free_kthread_struct(struct task_struct *k) { struct kthread *kthread; @@ -272,8 +278,8 @@ static int kthread(void *_create) struct kthread *self; int ret; - self = kzalloc(sizeof(*self), GFP_KERNEL); - set_kthread_struct(self); + set_kthread_struct(current); + self = to_kthread(current); /* If user was SIGKILLed, I release the structure. */ done = xchg(&create->done, NULL); @@ -451,7 +457,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), } EXPORT_SYMBOL(kthread_create_on_node); -static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state) +static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state) { unsigned long flags; @@ -467,7 +473,7 @@ static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mas raw_spin_unlock_irqrestore(&p->pi_lock, flags); } -static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) +static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state) { __kthread_bind_mask(p, cpumask_of(cpu), state); } @@ -1093,8 +1099,38 @@ void kthread_flush_work(struct kthread_work *work) EXPORT_SYMBOL_GPL(kthread_flush_work); /* - * This function removes the work from the worker queue. Also it makes sure - * that it won't get queued later via the delayed work's timer. + * Make sure that the timer is neither set nor running and could + * not manipulate the work list_head any longer. + * + * The function is called under worker->lock. The lock is temporary + * released but the timer can't be set again in the meantime. + */ +static void kthread_cancel_delayed_work_timer(struct kthread_work *work, + unsigned long *flags) +{ + struct kthread_delayed_work *dwork = + container_of(work, struct kthread_delayed_work, work); + struct kthread_worker *worker = work->worker; + + /* + * del_timer_sync() must be called to make sure that the timer + * callback is not running. The lock must be temporary released + * to avoid a deadlock with the callback. In the meantime, + * any queuing is blocked by setting the canceling counter. + */ + work->canceling++; + raw_spin_unlock_irqrestore(&worker->lock, *flags); + del_timer_sync(&dwork->timer); + raw_spin_lock_irqsave(&worker->lock, *flags); + work->canceling--; +} + +/* + * This function removes the work from the worker queue. + * + * It is called under worker->lock. The caller must make sure that + * the timer used by delayed work is not running, e.g. by calling + * kthread_cancel_delayed_work_timer(). * * The work might still be in use when this function finishes. See the * current_work proceed by the worker. @@ -1102,28 +1138,8 @@ EXPORT_SYMBOL_GPL(kthread_flush_work); * Return: %true if @work was pending and successfully canceled, * %false if @work was not pending */ -static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork, - unsigned long *flags) +static bool __kthread_cancel_work(struct kthread_work *work) { - /* Try to cancel the timer if exists. */ - if (is_dwork) { - struct kthread_delayed_work *dwork = - container_of(work, struct kthread_delayed_work, work); - struct kthread_worker *worker = work->worker; - - /* - * del_timer_sync() must be called to make sure that the timer - * callback is not running. The lock must be temporary released - * to avoid a deadlock with the callback. In the meantime, - * any queuing is blocked by setting the canceling counter. - */ - work->canceling++; - raw_spin_unlock_irqrestore(&worker->lock, *flags); - del_timer_sync(&dwork->timer); - raw_spin_lock_irqsave(&worker->lock, *flags); - work->canceling--; - } - /* * Try to remove the work from a worker list. It might either * be from worker->work_list or from worker->delayed_work_list. @@ -1146,14 +1162,14 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork, * modify @dwork's timer so that it expires after @delay. If @delay is zero, * @work is guaranteed to be queued immediately. * - * Return: %true if @dwork was pending and its timer was modified, - * %false otherwise. + * Return: %false if @dwork was idle and queued, %true otherwise. * * A special case is when the work is being canceled in parallel. * It might be caused either by the real kthread_cancel_delayed_work_sync() * or yet another kthread_mod_delayed_work() call. We let the other command - * win and return %false here. The caller is supposed to synchronize these - * operations a reasonable way. + * win and return %true here. The return value can be used for reference + * counting and the number of queued works stays the same. Anyway, the caller + * is supposed to synchronize these operations a reasonable way. * * This function is safe to call from any context including IRQ handler. * See __kthread_cancel_work() and kthread_delayed_work_timer_fn() @@ -1165,22 +1181,39 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, { struct kthread_work *work = &dwork->work; unsigned long flags; - int ret = false; + int ret; raw_spin_lock_irqsave(&worker->lock, flags); /* Do not bother with canceling when never queued. */ - if (!work->worker) + if (!work->worker) { + ret = false; goto fast_queue; + } /* Work must not be used with >1 worker, see kthread_queue_work() */ WARN_ON_ONCE(work->worker != worker); - /* Do not fight with another command that is canceling this work. */ - if (work->canceling) + /* + * Temporary cancel the work but do not fight with another command + * that is canceling the work as well. + * + * It is a bit tricky because of possible races with another + * mod_delayed_work() and cancel_delayed_work() callers. + * + * The timer must be canceled first because worker->lock is released + * when doing so. But the work can be removed from the queue (list) + * only when it can be queued again so that the return value can + * be used for reference counting. + */ + kthread_cancel_delayed_work_timer(work, &flags); + if (work->canceling) { + /* The number of works in the queue does not change. */ + ret = true; goto out; + } + ret = __kthread_cancel_work(work); - ret = __kthread_cancel_work(work, true, &flags); fast_queue: __kthread_queue_delayed_work(worker, dwork, delay); out: @@ -1202,7 +1235,10 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) /* Work must not be used with >1 worker, see kthread_queue_work(). */ WARN_ON_ONCE(work->worker != worker); - ret = __kthread_cancel_work(work, is_dwork, &flags); + if (is_dwork) + kthread_cancel_delayed_work_timer(work, &flags); + + ret = __kthread_cancel_work(work); if (worker->current_work != work) goto out_fast; diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 7641bd407239..bf1c00c881e4 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -760,7 +760,7 @@ static void lockdep_print_held_locks(struct task_struct *p) * It's not reliable to print a task's held locks if it's not sleeping * and it's not the current task. */ - if (p->state == TASK_RUNNING && p != current) + if (p != current && task_is_running(p)) return; for (i = 0; i < depth; i++) { printk(" #%d: ", i); @@ -843,7 +843,7 @@ static int count_matching_names(struct lock_class *new_class) } /* used from NMI context -- must be lockless */ -static __always_inline struct lock_class * +static noinstr struct lock_class * look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass) { struct lockdep_subclass_key *key; @@ -851,12 +851,14 @@ look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass) struct lock_class *class; if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { + instrumentation_begin(); debug_locks_off(); printk(KERN_ERR "BUG: looking up invalid subclass: %u\n", subclass); printk(KERN_ERR "turning off the locking correctness validator.\n"); dump_stack(); + instrumentation_end(); return NULL; } @@ -2304,7 +2306,56 @@ static void print_lock_class_header(struct lock_class *class, int depth) } /* - * printk the shortest lock dependencies from @start to @end in reverse order: + * Dependency path printing: + * + * After BFS we get a lock dependency path (linked via ->parent of lock_list), + * printing out each lock in the dependency path will help on understanding how + * the deadlock could happen. Here are some details about dependency path + * printing: + * + * 1) A lock_list can be either forwards or backwards for a lock dependency, + * for a lock dependency A -> B, there are two lock_lists: + * + * a) lock_list in the ->locks_after list of A, whose ->class is B and + * ->links_to is A. In this case, we can say the lock_list is + * "A -> B" (forwards case). + * + * b) lock_list in the ->locks_before list of B, whose ->class is A + * and ->links_to is B. In this case, we can say the lock_list is + * "B <- A" (bacwards case). + * + * The ->trace of both a) and b) point to the call trace where B was + * acquired with A held. + * + * 2) A "helper" lock_list is introduced during BFS, this lock_list doesn't + * represent a certain lock dependency, it only provides an initial entry + * for BFS. For example, BFS may introduce a "helper" lock_list whose + * ->class is A, as a result BFS will search all dependencies starting with + * A, e.g. A -> B or A -> C. + * + * The notation of a forwards helper lock_list is like "-> A", which means + * we should search the forwards dependencies starting with "A", e.g A -> B + * or A -> C. + * + * The notation of a bacwards helper lock_list is like "<- B", which means + * we should search the backwards dependencies ending with "B", e.g. + * B <- A or B <- C. + */ + +/* + * printk the shortest lock dependencies from @root to @leaf in reverse order. + * + * We have a lock dependency path as follow: + * + * @root @leaf + * | | + * V V + * ->parent ->parent + * | lock_list | <--------- | lock_list | ... | lock_list | <--------- | lock_list | + * | -> L1 | | L1 -> L2 | ... |Ln-2 -> Ln-1| | Ln-1 -> Ln| + * + * , so it's natural that we start from @leaf and print every ->class and + * ->trace until we reach the @root. */ static void __used print_shortest_lock_dependencies(struct lock_list *leaf, @@ -2332,6 +2383,61 @@ print_shortest_lock_dependencies(struct lock_list *leaf, } while (entry && (depth >= 0)); } +/* + * printk the shortest lock dependencies from @leaf to @root. + * + * We have a lock dependency path (from a backwards search) as follow: + * + * @leaf @root + * | | + * V V + * ->parent ->parent + * | lock_list | ---------> | lock_list | ... | lock_list | ---------> | lock_list | + * | L2 <- L1 | | L3 <- L2 | ... | Ln <- Ln-1 | | <- Ln | + * + * , so when we iterate from @leaf to @root, we actually print the lock + * dependency path L1 -> L2 -> .. -> Ln in the non-reverse order. + * + * Another thing to notice here is that ->class of L2 <- L1 is L1, while the + * ->trace of L2 <- L1 is the call trace of L2, in fact we don't have the call + * trace of L1 in the dependency path, which is alright, because most of the + * time we can figure out where L1 is held from the call trace of L2. + */ +static void __used +print_shortest_lock_dependencies_backwards(struct lock_list *leaf, + struct lock_list *root) +{ + struct lock_list *entry = leaf; + const struct lock_trace *trace = NULL; + int depth; + + /*compute depth from generated tree by BFS*/ + depth = get_lock_depth(leaf); + + do { + print_lock_class_header(entry->class, depth); + if (trace) { + printk("%*s ... acquired at:\n", depth, ""); + print_lock_trace(trace, 2); + printk("\n"); + } + + /* + * Record the pointer to the trace for the next lock_list + * entry, see the comments for the function. + */ + trace = entry->trace; + + if (depth == 0 && (entry != root)) { + printk("lockdep:%s bad path found in chain graph\n", __func__); + break; + } + + entry = get_lock_parent(entry); + depth--; + } while (entry && (depth >= 0)); +} + static void print_irq_lock_scenario(struct lock_list *safe_entry, struct lock_list *unsafe_entry, @@ -2446,10 +2552,7 @@ print_bad_irq_dependency(struct task_struct *curr, lockdep_print_held_locks(curr); pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass); - prev_root->trace = save_trace(); - if (!prev_root->trace) - return; - print_shortest_lock_dependencies(backwards_entry, prev_root); + print_shortest_lock_dependencies_backwards(backwards_entry, prev_root); pr_warn("\nthe dependencies between the lock to be acquired"); pr_warn(" and %s-irq-unsafe lock:\n", irqclass); @@ -2667,8 +2770,18 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, * Step 3: we found a bad match! Now retrieve a lock from the backward * list whose usage mask matches the exclusive usage mask from the * lock found on the forward list. + * + * Note, we should only keep the LOCKF_ENABLED_IRQ_ALL bits, considering + * the follow case: + * + * When trying to add A -> B to the graph, we find that there is a + * hardirq-safe L, that L -> ... -> A, and another hardirq-unsafe M, + * that B -> ... -> M. However M is **softirq-safe**, if we use exact + * invert bits of M's usage_mask, we will find another lock N that is + * **softirq-unsafe** and N -> ... -> A, however N -> .. -> M will not + * cause a inversion deadlock. */ - backward_mask = original_mask(target_entry1->class->usage_mask); + backward_mask = original_mask(target_entry1->class->usage_mask & LOCKF_ENABLED_IRQ_ALL); ret = find_usage_backwards(&this, backward_mask, &target_entry); if (bfs_error(ret)) { @@ -2718,7 +2831,7 @@ static inline bool usage_skip(struct lock_list *entry, void *mask) * <target> or not. If it can, <src> -> <target> dependency is already * in the graph. * - * Return BFS_RMATCH if it does, or BFS_RMATCH if it does not, return BFS_E* if + * Return BFS_RMATCH if it does, or BFS_RNOMATCH if it does not, return BFS_E* if * any error appears in the bfs search. */ static noinline enum bfs_result @@ -4577,7 +4690,7 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next) u8 curr_inner; int depth; - if (!curr->lockdep_depth || !next_inner || next->trylock) + if (!next_inner || next->trylock) return 0; if (!next_outer) @@ -6393,6 +6506,7 @@ asmlinkage __visible void lockdep_sys_exit(void) void lockdep_rcu_suspicious(const char *file, const int line, const char *s) { struct task_struct *curr = current; + int dl = READ_ONCE(debug_locks); /* Note: the following can be executed concurrently, so be careful. */ pr_warn("\n"); @@ -6402,11 +6516,12 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) pr_warn("-----------------------------\n"); pr_warn("%s:%d %s!\n", file, line, s); pr_warn("\nother info that might help us debug this:\n\n"); - pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n", + pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n%s", !rcu_lockdep_current_cpu_online() ? "RCU used illegally from offline CPU!\n" : "", - rcu_scheduler_active, debug_locks); + rcu_scheduler_active, dl, + dl ? "" : "Possible false positive due to lockdep disabling via debug_locks = 0\n"); /* * If a CPU is in the RCU-free window in idle (ie: in the section diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 013e1b08a1bf..d2df5e68b503 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -923,7 +923,7 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter, * Lock a mutex (possibly interruptible), slowpath: */ static __always_inline int __sched -__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, +__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass, struct lockdep_map *nest_lock, unsigned long ip, struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) { @@ -1098,14 +1098,14 @@ err_early_kill: } static int __sched -__mutex_lock(struct mutex *lock, long state, unsigned int subclass, +__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, struct lockdep_map *nest_lock, unsigned long ip) { return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); } static int __sched -__ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass, +__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, struct lockdep_map *nest_lock, unsigned long ip, struct ww_acquire_ctx *ww_ctx) { diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 406818196a9f..b5d9bb5202c6 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1135,7 +1135,7 @@ void __sched rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) * * Must be called with lock->wait_lock held and interrupts disabled */ -static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, int state, +static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state, struct hrtimer_sleeper *timeout, struct rt_mutex_waiter *waiter) { @@ -1190,7 +1190,7 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, /* * Slow path lock function: */ -static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, +static int __sched rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state, struct hrtimer_sleeper *timeout, enum rtmutex_chainwalk chwalk) { diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 809b0016d344..16bfbb10c74d 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -889,7 +889,7 @@ rwsem_spin_on_owner(struct rw_semaphore *sem) * Wait for the read lock to be granted */ static struct rw_semaphore __sched * -rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, int state) +rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state) { long adjustment = -RWSEM_READER_BIAS; long rcnt = (count >> RWSEM_READER_SHIFT); diff --git a/kernel/module.c b/kernel/module.c index 7e78dfabca97..927d46cb8eb9 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -266,9 +266,18 @@ static void module_assert_mutex_or_preempt(void) #endif } +#ifdef CONFIG_MODULE_SIG static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); module_param(sig_enforce, bool_enable_only, 0644); +void set_module_sig_enforced(void) +{ + sig_enforce = true; +} +#else +#define sig_enforce false +#endif + /* * Export sig_enforce kernel cmdline parameter to allow other subsystems rely * on that instead of directly to CONFIG_MODULE_SIG_FORCE config. @@ -279,11 +288,6 @@ bool is_module_sig_enforced(void) } EXPORT_SYMBOL(is_module_sig_enforced); -void set_module_sig_enforced(void) -{ - sig_enforce = true; -} - /* Block module loading/unloading? */ int modules_disabled = 0; core_param(nomodule, modules_disabled, bint, 0); diff --git a/kernel/panic.c b/kernel/panic.c index 332736a72a58..edad89660a2b 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -23,6 +23,7 @@ #include <linux/reboot.h> #include <linux/delay.h> #include <linux/kexec.h> +#include <linux/panic_notifier.h> #include <linux/sched.h> #include <linux/sysrq.h> #include <linux/init.h> diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 6bfe3ead10ad..a12779650f15 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -98,20 +98,20 @@ config PM_STD_PARTITION default "" help The default resume partition is the partition that the suspend- - to-disk implementation will look for a suspended disk image. + to-disk implementation will look for a suspended disk image. - The partition specified here will be different for almost every user. + The partition specified here will be different for almost every user. It should be a valid swap partition (at least for now) that is turned - on before suspending. + on before suspending. The partition specified can be overridden by specifying: - resume=/dev/<other device> + resume=/dev/<other device> - which will set the resume partition to the device specified. + which will set the resume partition to the device specified. Note there is currently not a way to specify which device to save the - suspended image to. It will simply pick the first available swap + suspended image to. It will simply pick the first available swap device. config PM_SLEEP diff --git a/kernel/power/process.c b/kernel/power/process.c index 50cc63534486..37401c99b7d7 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * drivers/power/process.c - Functions for starting/stopping processes on + * drivers/power/process.c - Functions for starting/stopping processes on * suspend transitions. * * Originally from swsusp. diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 1a221dcb3c01..f7a986078213 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -331,7 +331,7 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size) * * Memory bitmap is a structure consisting of many linked lists of * objects. The main list's elements are of type struct zone_bitmap - * and each of them corresonds to one zone. For each zone bitmap + * and each of them corresponds to one zone. For each zone bitmap * object there is a list of objects of type struct bm_block that * represent each blocks of bitmap in which information is stored. * @@ -1146,7 +1146,7 @@ int create_basic_memory_bitmaps(void) Free_second_object: kfree(bm2); Free_first_bitmap: - memory_bm_free(bm1, PG_UNSAFE_CLEAR); + memory_bm_free(bm1, PG_UNSAFE_CLEAR); Free_first_object: kfree(bm1); return -ENOMEM; @@ -1500,7 +1500,7 @@ static struct memory_bitmap copy_bm; /** * swsusp_free - Free pages allocated for hibernation image. * - * Image pages are alocated before snapshot creation, so they need to be + * Image pages are allocated before snapshot creation, so they need to be * released after resume. */ void swsusp_free(void) @@ -2326,7 +2326,7 @@ static struct memory_bitmap *safe_highmem_bm; * (@nr_highmem_p points to the variable containing the number of highmem image * pages). The pages that are "safe" (ie. will not be overwritten when the * hibernation image is restored entirely) have the corresponding bits set in - * @bm (it must be unitialized). + * @bm (it must be uninitialized). * * NOTE: This function should not be called if there are no highmem image pages. */ @@ -2483,7 +2483,7 @@ static inline void free_highmem_data(void) {} /** * prepare_image - Make room for loading hibernation image. - * @new_bm: Unitialized memory bitmap structure. + * @new_bm: Uninitialized memory bitmap structure. * @bm: Memory bitmap with unsafe pages marked. * * Use @bm to mark the pages that will be overwritten in the process of diff --git a/kernel/power/swap.c b/kernel/power/swap.c index bea3cb8afa11..3cb89baebc79 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -1125,7 +1125,7 @@ struct dec_data { }; /** - * Deompression function that runs in its own thread. + * Decompression function that runs in its own thread. */ static int lzo_decompress_threadfn(void *data) { diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 421c35571797..142a58d124d9 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -3531,3 +3531,119 @@ void kmsg_dump_rewind(struct kmsg_dump_iter *iter) EXPORT_SYMBOL_GPL(kmsg_dump_rewind); #endif + +#ifdef CONFIG_SMP +static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1); +static atomic_t printk_cpulock_nested = ATOMIC_INIT(0); + +/** + * __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant + * spinning lock is not owned by any CPU. + * + * Context: Any context. + */ +void __printk_wait_on_cpu_lock(void) +{ + do { + cpu_relax(); + } while (atomic_read(&printk_cpulock_owner) != -1); +} +EXPORT_SYMBOL(__printk_wait_on_cpu_lock); + +/** + * __printk_cpu_trylock() - Try to acquire the printk cpu-reentrant + * spinning lock. + * + * If no processor has the lock, the calling processor takes the lock and + * becomes the owner. If the calling processor is already the owner of the + * lock, this function succeeds immediately. + * + * Context: Any context. Expects interrupts to be disabled. + * Return: 1 on success, otherwise 0. + */ +int __printk_cpu_trylock(void) +{ + int cpu; + int old; + + cpu = smp_processor_id(); + + /* + * Guarantee loads and stores from this CPU when it is the lock owner + * are _not_ visible to the previous lock owner. This pairs with + * __printk_cpu_unlock:B. + * + * Memory barrier involvement: + * + * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, then + * __printk_cpu_unlock:A can never read from __printk_cpu_trylock:B. + * + * Relies on: + * + * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B + * of the previous CPU + * matching + * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B + * of this CPU + */ + old = atomic_cmpxchg_acquire(&printk_cpulock_owner, -1, + cpu); /* LMM(__printk_cpu_trylock:A) */ + if (old == -1) { + /* + * This CPU is now the owner and begins loading/storing + * data: LMM(__printk_cpu_trylock:B) + */ + return 1; + + } else if (old == cpu) { + /* This CPU is already the owner. */ + atomic_inc(&printk_cpulock_nested); + return 1; + } + + return 0; +} +EXPORT_SYMBOL(__printk_cpu_trylock); + +/** + * __printk_cpu_unlock() - Release the printk cpu-reentrant spinning lock. + * + * The calling processor must be the owner of the lock. + * + * Context: Any context. Expects interrupts to be disabled. + */ +void __printk_cpu_unlock(void) +{ + if (atomic_read(&printk_cpulock_nested)) { + atomic_dec(&printk_cpulock_nested); + return; + } + + /* + * This CPU is finished loading/storing data: + * LMM(__printk_cpu_unlock:A) + */ + + /* + * Guarantee loads and stores from this CPU when it was the + * lock owner are visible to the next lock owner. This pairs + * with __printk_cpu_trylock:A. + * + * Memory barrier involvement: + * + * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, + * then __printk_cpu_trylock:B reads from __printk_cpu_unlock:A. + * + * Relies on: + * + * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B + * of this CPU + * matching + * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B + * of the next CPU + */ + atomic_set_release(&printk_cpulock_owner, + -1); /* LMM(__printk_cpu_unlock:B) */ +} +EXPORT_SYMBOL(__printk_cpu_unlock); +#endif /* CONFIG_SMP */ diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c index 7a1414622051..94232186fccb 100644 --- a/kernel/printk/printk_safe.c +++ b/kernel/printk/printk_safe.c @@ -391,6 +391,7 @@ asmlinkage int vprintk(const char *fmt, va_list args) /* No obstacles. */ return vprintk_default(fmt, args); } +EXPORT_SYMBOL(vprintk); void __init printk_safe_init(void) { @@ -411,4 +412,3 @@ void __init printk_safe_init(void) /* Flush pending messages that did not have scheduled IRQ works. */ printk_safe_flush(); } -EXPORT_SYMBOL(vprintk); diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 2997ca600d18..f8589bf8d7dc 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -197,7 +197,7 @@ static bool ptrace_freeze_traced(struct task_struct *task) spin_lock_irq(&task->sighand->siglock); if (task_is_traced(task) && !looks_like_a_spurious_pid(task) && !__fatal_signal_pending(task)) { - task->state = __TASK_TRACED; + WRITE_ONCE(task->__state, __TASK_TRACED); ret = true; } spin_unlock_irq(&task->sighand->siglock); @@ -207,7 +207,7 @@ static bool ptrace_freeze_traced(struct task_struct *task) static void ptrace_unfreeze_traced(struct task_struct *task) { - if (task->state != __TASK_TRACED) + if (READ_ONCE(task->__state) != __TASK_TRACED) return; WARN_ON(!task->ptrace || task->parent != current); @@ -217,11 +217,11 @@ static void ptrace_unfreeze_traced(struct task_struct *task) * Recheck state under the lock to close this race. */ spin_lock_irq(&task->sighand->siglock); - if (task->state == __TASK_TRACED) { + if (READ_ONCE(task->__state) == __TASK_TRACED) { if (__fatal_signal_pending(task)) wake_up_state(task, __TASK_TRACED); else - task->state = TASK_TRACED; + WRITE_ONCE(task->__state, TASK_TRACED); } spin_unlock_irq(&task->sighand->siglock); } @@ -256,7 +256,7 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state) */ read_lock(&tasklist_lock); if (child->ptrace && child->parent == current) { - WARN_ON(child->state == __TASK_TRACED); + WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED); /* * child->sighand can't be NULL, release_task() * does ptrace_unlink() before __exit_signal(). @@ -273,7 +273,7 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state) * ptrace_stop() changes ->state back to TASK_RUNNING, * so we should not worry about leaking __TASK_TRACED. */ - WARN_ON(child->state == __TASK_TRACED); + WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED); ret = -ESRCH; } } diff --git a/kernel/rcu/Kconfig.debug b/kernel/rcu/Kconfig.debug index 1942c1f1bb65..4fd64999300f 100644 --- a/kernel/rcu/Kconfig.debug +++ b/kernel/rcu/Kconfig.debug @@ -116,7 +116,7 @@ config RCU_EQS_DEBUG config RCU_STRICT_GRACE_PERIOD bool "Provide debug RCU implementation with short grace periods" - depends on DEBUG_KERNEL && RCU_EXPERT + depends on DEBUG_KERNEL && RCU_EXPERT && NR_CPUS <= 4 default n select PREEMPT_COUNT if PREEMPT=n help diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index bf0827d4b659..24b5f2c2de87 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -308,6 +308,8 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt) } } +extern void rcu_init_geometry(void); + /* Returns a pointer to the first leaf rcu_node structure. */ #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1]) @@ -422,12 +424,6 @@ do { \ #endif /* #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU) */ -#ifdef CONFIG_SRCU -void srcu_init(void); -#else /* #ifdef CONFIG_SRCU */ -static inline void srcu_init(void) { } -#endif /* #else #ifdef CONFIG_SRCU */ - #ifdef CONFIG_TINY_RCU /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ static inline bool rcu_gp_is_normal(void) { return true; } @@ -441,7 +437,11 @@ bool rcu_gp_is_expedited(void); /* Internal RCU use. */ void rcu_expedite_gp(void); void rcu_unexpedite_gp(void); void rcupdate_announce_bootup_oddness(void); +#ifdef CONFIG_TASKS_RCU_GENERIC void show_rcu_tasks_gp_kthreads(void); +#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ +static inline void show_rcu_tasks_gp_kthreads(void) {} +#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ void rcu_request_urgent_qs_task(struct task_struct *t); #endif /* #else #ifdef CONFIG_TINY_RCU */ @@ -519,6 +519,7 @@ static inline unsigned long rcu_exp_batches_completed(void) { return 0; } static inline unsigned long srcu_batches_completed(struct srcu_struct *sp) { return 0; } static inline void rcu_force_quiescent_state(void) { } +static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; } static inline void show_rcu_gp_kthreads(void) { } static inline int rcu_get_gp_kthreads_prio(void) { return 0; } static inline void rcu_fwd_progress_check(unsigned long j) { } @@ -527,6 +528,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp); unsigned long rcu_get_gp_seq(void); unsigned long rcu_exp_batches_completed(void); unsigned long srcu_batches_completed(struct srcu_struct *sp); +bool rcu_check_boost_fail(unsigned long gp_state, int *cpup); void show_rcu_gp_kthreads(void); int rcu_get_gp_kthreads_prio(void); void rcu_fwd_progress_check(unsigned long j); diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 29d2f4c647d3..40ef5417d954 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -245,12 +245,6 @@ static const char *rcu_torture_writer_state_getname(void) return rcu_torture_writer_state_names[i]; } -#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_PREEMPT_RT) -# define rcu_can_boost() 1 -#else -# define rcu_can_boost() 0 -#endif - #ifdef CONFIG_RCU_TRACE static u64 notrace rcu_trace_clock_local(void) { @@ -331,6 +325,7 @@ struct rcu_torture_ops { void (*read_delay)(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp); void (*readunlock)(int idx); + int (*readlock_held)(void); unsigned long (*get_gp_seq)(void); unsigned long (*gp_diff)(unsigned long new, unsigned long old); void (*deferred_free)(struct rcu_torture *p); @@ -345,6 +340,7 @@ struct rcu_torture_ops { void (*fqs)(void); void (*stats)(void); void (*gp_kthread_dbg)(void); + bool (*check_boost_failed)(unsigned long gp_state, int *cpup); int (*stall_dur)(void); int irq_capable; int can_boost; @@ -359,6 +355,11 @@ static struct rcu_torture_ops *cur_ops; * Definitions for rcu torture testing. */ +static int torture_readlock_not_held(void) +{ + return rcu_read_lock_bh_held() || rcu_read_lock_sched_held(); +} + static int rcu_torture_read_lock(void) __acquires(RCU) { rcu_read_lock(); @@ -483,30 +484,32 @@ static void rcu_sync_torture_init(void) } static struct rcu_torture_ops rcu_ops = { - .ttype = RCU_FLAVOR, - .init = rcu_sync_torture_init, - .readlock = rcu_torture_read_lock, - .read_delay = rcu_read_delay, - .readunlock = rcu_torture_read_unlock, - .get_gp_seq = rcu_get_gp_seq, - .gp_diff = rcu_seq_diff, - .deferred_free = rcu_torture_deferred_free, - .sync = synchronize_rcu, - .exp_sync = synchronize_rcu_expedited, - .get_gp_state = get_state_synchronize_rcu, - .start_gp_poll = start_poll_synchronize_rcu, - .poll_gp_state = poll_state_synchronize_rcu, - .cond_sync = cond_synchronize_rcu, - .call = call_rcu, - .cb_barrier = rcu_barrier, - .fqs = rcu_force_quiescent_state, - .stats = NULL, - .gp_kthread_dbg = show_rcu_gp_kthreads, - .stall_dur = rcu_jiffies_till_stall_check, - .irq_capable = 1, - .can_boost = rcu_can_boost(), - .extendables = RCUTORTURE_MAX_EXTEND, - .name = "rcu" + .ttype = RCU_FLAVOR, + .init = rcu_sync_torture_init, + .readlock = rcu_torture_read_lock, + .read_delay = rcu_read_delay, + .readunlock = rcu_torture_read_unlock, + .readlock_held = torture_readlock_not_held, + .get_gp_seq = rcu_get_gp_seq, + .gp_diff = rcu_seq_diff, + .deferred_free = rcu_torture_deferred_free, + .sync = synchronize_rcu, + .exp_sync = synchronize_rcu_expedited, + .get_gp_state = get_state_synchronize_rcu, + .start_gp_poll = start_poll_synchronize_rcu, + .poll_gp_state = poll_state_synchronize_rcu, + .cond_sync = cond_synchronize_rcu, + .call = call_rcu, + .cb_barrier = rcu_barrier, + .fqs = rcu_force_quiescent_state, + .stats = NULL, + .gp_kthread_dbg = show_rcu_gp_kthreads, + .check_boost_failed = rcu_check_boost_fail, + .stall_dur = rcu_jiffies_till_stall_check, + .irq_capable = 1, + .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), + .extendables = RCUTORTURE_MAX_EXTEND, + .name = "rcu" }; /* @@ -540,6 +543,7 @@ static struct rcu_torture_ops rcu_busted_ops = { .readlock = rcu_torture_read_lock, .read_delay = rcu_read_delay, /* just reuse rcu's version. */ .readunlock = rcu_torture_read_unlock, + .readlock_held = torture_readlock_not_held, .get_gp_seq = rcu_no_completed, .deferred_free = rcu_busted_torture_deferred_free, .sync = synchronize_rcu_busted, @@ -589,6 +593,11 @@ static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) srcu_read_unlock(srcu_ctlp, idx); } +static int torture_srcu_read_lock_held(void) +{ + return srcu_read_lock_held(srcu_ctlp); +} + static unsigned long srcu_torture_completed(void) { return srcu_batches_completed(srcu_ctlp); @@ -646,6 +655,7 @@ static struct rcu_torture_ops srcu_ops = { .readlock = srcu_torture_read_lock, .read_delay = srcu_read_delay, .readunlock = srcu_torture_read_unlock, + .readlock_held = torture_srcu_read_lock_held, .get_gp_seq = srcu_torture_completed, .deferred_free = srcu_torture_deferred_free, .sync = srcu_torture_synchronize, @@ -681,6 +691,7 @@ static struct rcu_torture_ops srcud_ops = { .readlock = srcu_torture_read_lock, .read_delay = srcu_read_delay, .readunlock = srcu_torture_read_unlock, + .readlock_held = torture_srcu_read_lock_held, .get_gp_seq = srcu_torture_completed, .deferred_free = srcu_torture_deferred_free, .sync = srcu_torture_synchronize, @@ -700,6 +711,7 @@ static struct rcu_torture_ops busted_srcud_ops = { .readlock = srcu_torture_read_lock, .read_delay = rcu_read_delay, .readunlock = srcu_torture_read_unlock, + .readlock_held = torture_srcu_read_lock_held, .get_gp_seq = srcu_torture_completed, .deferred_free = srcu_torture_deferred_free, .sync = srcu_torture_synchronize, @@ -787,6 +799,7 @@ static struct rcu_torture_ops trivial_ops = { .readlock = rcu_torture_read_lock_trivial, .read_delay = rcu_read_delay, /* just reuse rcu's version. */ .readunlock = rcu_torture_read_unlock_trivial, + .readlock_held = torture_readlock_not_held, .get_gp_seq = rcu_no_completed, .sync = synchronize_rcu_trivial, .exp_sync = synchronize_rcu_trivial, @@ -850,6 +863,7 @@ static struct rcu_torture_ops tasks_tracing_ops = { .readlock = tasks_tracing_torture_read_lock, .read_delay = srcu_read_delay, /* just reuse srcu's version. */ .readunlock = tasks_tracing_torture_read_unlock, + .readlock_held = rcu_read_lock_trace_held, .get_gp_seq = rcu_no_completed, .deferred_free = rcu_tasks_tracing_torture_deferred_free, .sync = synchronize_rcu_tasks_trace, @@ -871,32 +885,13 @@ static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) return cur_ops->gp_diff(new, old); } -static bool __maybe_unused torturing_tasks(void) -{ - return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops; -} - /* * RCU torture priority-boost testing. Runs one real-time thread per - * CPU for moderate bursts, repeatedly registering RCU callbacks and - * spinning waiting for them to be invoked. If a given callback takes - * too long to be invoked, we assume that priority inversion has occurred. + * CPU for moderate bursts, repeatedly starting grace periods and waiting + * for them to complete. If a given grace period takes too long, we assume + * that priority inversion has occurred. */ -struct rcu_boost_inflight { - struct rcu_head rcu; - int inflight; -}; - -static void rcu_torture_boost_cb(struct rcu_head *head) -{ - struct rcu_boost_inflight *rbip = - container_of(head, struct rcu_boost_inflight, rcu); - - /* Ensure RCU-core accesses precede clearing ->inflight */ - smp_store_release(&rbip->inflight, 0); -} - static int old_rt_runtime = -1; static void rcu_torture_disable_rt_throttle(void) @@ -923,49 +918,68 @@ static void rcu_torture_enable_rt_throttle(void) old_rt_runtime = -1; } -static bool rcu_torture_boost_failed(unsigned long start, unsigned long end) +static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start) { + int cpu; static int dbg_done; - - if (end - start > test_boost_duration * HZ - HZ / 2) { + unsigned long end = jiffies; + bool gp_done; + unsigned long j; + static unsigned long last_persist; + unsigned long lp; + unsigned long mininterval = test_boost_duration * HZ - HZ / 2; + + if (end - *start > mininterval) { + // Recheck after checking time to avoid false positives. + smp_mb(); // Time check before grace-period check. + if (cur_ops->poll_gp_state(gp_state)) + return false; // passed, though perhaps just barely + if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { + // At most one persisted message per boost test. + j = jiffies; + lp = READ_ONCE(last_persist); + if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp) + pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu); + return false; // passed on a technicality + } VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); n_rcu_torture_boost_failure++; - if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) + if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { + pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n", + current->rt_priority, gp_state, end - *start); cur_ops->gp_kthread_dbg(); + // Recheck after print to flag grace period ending during splat. + gp_done = cur_ops->poll_gp_state(gp_state); + pr_info("Boost inversion: GP %lu %s.\n", gp_state, + gp_done ? "ended already" : "still pending"); - return true; /* failed */ + } + + return true; // failed + } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { + *start = jiffies; } - return false; /* passed */ + return false; // passed } static int rcu_torture_boost(void *arg) { - unsigned long call_rcu_time; unsigned long endtime; + unsigned long gp_state; + unsigned long gp_state_time; unsigned long oldstarttime; - struct rcu_boost_inflight rbi = { .inflight = 0 }; VERBOSE_TOROUT_STRING("rcu_torture_boost started"); /* Set real-time priority. */ sched_set_fifo_low(current); - init_rcu_head_on_stack(&rbi.rcu); /* Each pass through the following loop does one boost-test cycle. */ do { bool failed = false; // Test failed already in this test interval - bool firsttime = true; + bool gp_initiated = false; - /* Increment n_rcu_torture_boosts once per boost-test */ - while (!kthread_should_stop()) { - if (mutex_trylock(&boost_mutex)) { - n_rcu_torture_boosts++; - mutex_unlock(&boost_mutex); - break; - } - schedule_timeout_uninterruptible(1); - } if (kthread_should_stop()) goto checkwait; @@ -979,33 +993,33 @@ static int rcu_torture_boost(void *arg) goto checkwait; } - /* Do one boost-test interval. */ + // Do one boost-test interval. endtime = oldstarttime + test_boost_duration * HZ; while (time_before(jiffies, endtime)) { - /* If we don't have a callback in flight, post one. */ - if (!smp_load_acquire(&rbi.inflight)) { - /* RCU core before ->inflight = 1. */ - smp_store_release(&rbi.inflight, 1); - cur_ops->call(&rbi.rcu, rcu_torture_boost_cb); - /* Check if the boost test failed */ - if (!firsttime && !failed) - failed = rcu_torture_boost_failed(call_rcu_time, jiffies); - call_rcu_time = jiffies; - firsttime = false; + // Has current GP gone too long? + if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) + failed = rcu_torture_boost_failed(gp_state, &gp_state_time); + // If we don't have a grace period in flight, start one. + if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { + gp_state = cur_ops->start_gp_poll(); + gp_initiated = true; + gp_state_time = jiffies; } - if (stutter_wait("rcu_torture_boost")) + if (stutter_wait("rcu_torture_boost")) { sched_set_fifo_low(current); + // If the grace period already ended, + // we don't know when that happened, so + // start over. + if (cur_ops->poll_gp_state(gp_state)) + gp_initiated = false; + } if (torture_must_stop()) goto checkwait; } - /* - * If boost never happened, then inflight will always be 1, in - * this case the boost check would never happen in the above - * loop so do another one here. - */ - if (!firsttime && !failed && smp_load_acquire(&rbi.inflight)) - rcu_torture_boost_failed(call_rcu_time, jiffies); + // In case the grace period extended beyond the end of the loop. + if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) + rcu_torture_boost_failed(gp_state, &gp_state_time); /* * Set the start time of the next test interval. @@ -1014,11 +1028,12 @@ static int rcu_torture_boost(void *arg) * interval. Besides, we are running at RT priority, * so delays should be relatively rare. */ - while (oldstarttime == boost_starttime && - !kthread_should_stop()) { + while (oldstarttime == boost_starttime && !kthread_should_stop()) { if (mutex_trylock(&boost_mutex)) { - boost_starttime = jiffies + - test_boost_interval * HZ; + if (oldstarttime == boost_starttime) { + boost_starttime = jiffies + test_boost_interval * HZ; + n_rcu_torture_boosts++; + } mutex_unlock(&boost_mutex); break; } @@ -1030,15 +1045,11 @@ checkwait: if (stutter_wait("rcu_torture_boost")) sched_set_fifo_low(current); } while (!torture_must_stop()); - while (smp_load_acquire(&rbi.inflight)) - schedule_timeout_uninterruptible(1); // rcu_barrier() deadlocks. - /* Clean up and exit. */ - while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { + while (!kthread_should_stop()) { torture_shutdown_absorb("rcu_torture_boost"); schedule_timeout_uninterruptible(1); } - destroy_rcu_head_on_stack(&rbi.rcu); torture_kthread_stopping("rcu_torture_boost"); return 0; } @@ -1553,11 +1564,7 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) started = cur_ops->get_gp_seq(); ts = rcu_trace_clock_local(); p = rcu_dereference_check(rcu_torture_current, - rcu_read_lock_bh_held() || - rcu_read_lock_sched_held() || - srcu_read_lock_held(srcu_ctlp) || - rcu_read_lock_trace_held() || - torturing_tasks()); + !cur_ops->readlock_held || cur_ops->readlock_held()); if (p == NULL) { /* Wait for rcu_torture_writer to get underway */ rcutorture_one_extend(&readstate, 0, trsp, rtrsp); @@ -1831,10 +1838,10 @@ rcu_torture_stats_print(void) srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); wtp = READ_ONCE(writer_task); - pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n", + pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n", rcu_torture_writer_state_getname(), rcu_torture_writer_state, gp_seq, flags, - wtp == NULL ? ~0UL : wtp->state, + wtp == NULL ? ~0U : wtp->__state, wtp == NULL ? -1 : (int)task_cpu(wtp)); if (!splatted && wtp) { sched_show_task(wtp); @@ -1861,48 +1868,49 @@ rcu_torture_stats(void *arg) torture_shutdown_absorb("rcu_torture_stats"); } while (!torture_must_stop()); torture_kthread_stopping("rcu_torture_stats"); - - { - struct rcu_head *rhp; - struct kmem_cache *kcp; - static int z; - - kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); - rhp = kmem_cache_alloc(kcp, GFP_KERNEL); - pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); - pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); - mem_dump_obj(ZERO_SIZE_PTR); - pr_alert("mem_dump_obj(NULL):"); - mem_dump_obj(NULL); - pr_alert("mem_dump_obj(%px):", &rhp); - mem_dump_obj(&rhp); - pr_alert("mem_dump_obj(%px):", rhp); - mem_dump_obj(rhp); - pr_alert("mem_dump_obj(%px):", &rhp->func); - mem_dump_obj(&rhp->func); - pr_alert("mem_dump_obj(%px):", &z); - mem_dump_obj(&z); - kmem_cache_free(kcp, rhp); - kmem_cache_destroy(kcp); - rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); - pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); - pr_alert("mem_dump_obj(kmalloc %px):", rhp); - mem_dump_obj(rhp); - pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); - mem_dump_obj(&rhp->func); - kfree(rhp); - rhp = vmalloc(4096); - pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); - pr_alert("mem_dump_obj(vmalloc %px):", rhp); - mem_dump_obj(rhp); - pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); - mem_dump_obj(&rhp->func); - vfree(rhp); - } - return 0; } +/* Test mem_dump_obj() and friends. */ +static void rcu_torture_mem_dump_obj(void) +{ + struct rcu_head *rhp; + struct kmem_cache *kcp; + static int z; + + kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); + rhp = kmem_cache_alloc(kcp, GFP_KERNEL); + pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); + pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); + mem_dump_obj(ZERO_SIZE_PTR); + pr_alert("mem_dump_obj(NULL):"); + mem_dump_obj(NULL); + pr_alert("mem_dump_obj(%px):", &rhp); + mem_dump_obj(&rhp); + pr_alert("mem_dump_obj(%px):", rhp); + mem_dump_obj(rhp); + pr_alert("mem_dump_obj(%px):", &rhp->func); + mem_dump_obj(&rhp->func); + pr_alert("mem_dump_obj(%px):", &z); + mem_dump_obj(&z); + kmem_cache_free(kcp, rhp); + kmem_cache_destroy(kcp); + rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); + pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); + pr_alert("mem_dump_obj(kmalloc %px):", rhp); + mem_dump_obj(rhp); + pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); + mem_dump_obj(&rhp->func); + kfree(rhp); + rhp = vmalloc(4096); + pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); + pr_alert("mem_dump_obj(vmalloc %px):", rhp); + mem_dump_obj(rhp); + pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); + mem_dump_obj(&rhp->func); + vfree(rhp); +} + static void rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) { @@ -2634,7 +2642,7 @@ static bool rcu_torture_can_boost(void) if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) return false; - if (!cur_ops->call) + if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state) return false; prio = rcu_get_gp_kthreads_prio(); @@ -2642,7 +2650,7 @@ static bool rcu_torture_can_boost(void) return false; if (prio < 2) { - if (boost_warn_once == 1) + if (boost_warn_once == 1) return false; pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); @@ -2818,6 +2826,8 @@ rcu_torture_cleanup(void) if (cur_ops->cleanup != NULL) cur_ops->cleanup(); + rcu_torture_mem_dump_obj(); + rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ if (err_segs_recorded) { @@ -3120,6 +3130,21 @@ rcu_torture_init(void) if (firsterr < 0) goto unwind; rcutor_hp = firsterr; + + // Testing RCU priority boosting requires rcutorture do + // some serious abuse. Counter this by running ksoftirqd + // at higher priority. + if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) { + for_each_online_cpu(cpu) { + struct sched_param sp; + struct task_struct *t; + + t = per_cpu(ksoftirqd, cpu); + WARN_ON_ONCE(!t); + sp.sched_priority = 2; + sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); + } + } } shutdown_jiffies = jiffies + shutdown_secs * HZ; firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c index 02dd9767b559..313d4547cbc7 100644 --- a/kernel/rcu/refscale.c +++ b/kernel/rcu/refscale.c @@ -362,6 +362,111 @@ static struct ref_scale_ops rwsem_ops = { .name = "rwsem" }; +// Definitions for global spinlock +static DEFINE_SPINLOCK(test_lock); + +static void ref_lock_section(const int nloops) +{ + int i; + + preempt_disable(); + for (i = nloops; i >= 0; i--) { + spin_lock(&test_lock); + spin_unlock(&test_lock); + } + preempt_enable(); +} + +static void ref_lock_delay_section(const int nloops, const int udl, const int ndl) +{ + int i; + + preempt_disable(); + for (i = nloops; i >= 0; i--) { + spin_lock(&test_lock); + un_delay(udl, ndl); + spin_unlock(&test_lock); + } + preempt_enable(); +} + +static struct ref_scale_ops lock_ops = { + .readsection = ref_lock_section, + .delaysection = ref_lock_delay_section, + .name = "lock" +}; + +// Definitions for global irq-save spinlock + +static void ref_lock_irq_section(const int nloops) +{ + unsigned long flags; + int i; + + preempt_disable(); + for (i = nloops; i >= 0; i--) { + spin_lock_irqsave(&test_lock, flags); + spin_unlock_irqrestore(&test_lock, flags); + } + preempt_enable(); +} + +static void ref_lock_irq_delay_section(const int nloops, const int udl, const int ndl) +{ + unsigned long flags; + int i; + + preempt_disable(); + for (i = nloops; i >= 0; i--) { + spin_lock_irqsave(&test_lock, flags); + un_delay(udl, ndl); + spin_unlock_irqrestore(&test_lock, flags); + } + preempt_enable(); +} + +static struct ref_scale_ops lock_irq_ops = { + .readsection = ref_lock_irq_section, + .delaysection = ref_lock_irq_delay_section, + .name = "lock-irq" +}; + +// Definitions acquire-release. +static DEFINE_PER_CPU(unsigned long, test_acqrel); + +static void ref_acqrel_section(const int nloops) +{ + unsigned long x; + int i; + + preempt_disable(); + for (i = nloops; i >= 0; i--) { + x = smp_load_acquire(this_cpu_ptr(&test_acqrel)); + smp_store_release(this_cpu_ptr(&test_acqrel), x + 1); + } + preempt_enable(); +} + +static void ref_acqrel_delay_section(const int nloops, const int udl, const int ndl) +{ + unsigned long x; + int i; + + preempt_disable(); + for (i = nloops; i >= 0; i--) { + x = smp_load_acquire(this_cpu_ptr(&test_acqrel)); + un_delay(udl, ndl); + smp_store_release(this_cpu_ptr(&test_acqrel), x + 1); + } + preempt_enable(); +} + +static struct ref_scale_ops acqrel_ops = { + .readsection = ref_acqrel_section, + .delaysection = ref_acqrel_delay_section, + .name = "acqrel" +}; + static void rcu_scale_one_reader(void) { if (readdelay <= 0) @@ -653,8 +758,8 @@ ref_scale_init(void) long i; int firsterr = 0; static struct ref_scale_ops *scale_ops[] = { - &rcu_ops, &srcu_ops, &rcu_trace_ops, &rcu_tasks_ops, - &refcnt_ops, &rwlock_ops, &rwsem_ops, + &rcu_ops, &srcu_ops, &rcu_trace_ops, &rcu_tasks_ops, &refcnt_ops, &rwlock_ops, + &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, }; if (!torture_init_begin(scale_type, verbose)) diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index e26547b34ad3..6833d8887181 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -80,7 +80,7 @@ do { \ * srcu_read_unlock() running against them. So if the is_static parameter * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. */ -static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static) +static void init_srcu_struct_nodes(struct srcu_struct *ssp) { int cpu; int i; @@ -90,6 +90,9 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static) struct srcu_node *snp; struct srcu_node *snp_first; + /* Initialize geometry if it has not already been initialized. */ + rcu_init_geometry(); + /* Work out the overall tree geometry. */ ssp->level[0] = &ssp->node[0]; for (i = 1; i < rcu_num_lvls; i++) @@ -148,14 +151,6 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static) timer_setup(&sdp->delay_work, srcu_delay_timer, 0); sdp->ssp = ssp; sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); - if (is_static) - continue; - - /* Dynamically allocated, better be no srcu_read_locks()! */ - for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) { - sdp->srcu_lock_count[i] = 0; - sdp->srcu_unlock_count[i] = 0; - } } } @@ -179,7 +174,7 @@ static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) ssp->sda = alloc_percpu(struct srcu_data); if (!ssp->sda) return -ENOMEM; - init_srcu_struct_nodes(ssp, is_static); + init_srcu_struct_nodes(ssp); ssp->srcu_gp_seq_needed_exp = 0; ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */ @@ -777,9 +772,9 @@ static bool srcu_might_be_idle(struct srcu_struct *ssp) spin_unlock_irqrestore_rcu_node(sdp, flags); /* - * No local callbacks, so probabalistically probe global state. + * No local callbacks, so probabilistically probe global state. * Exact information would require acquiring locks, which would - * kill scalability, hence the probabalistic nature of the probe. + * kill scalability, hence the probabilistic nature of the probe. */ /* First, see if enough time has passed since the last GP. */ @@ -1000,6 +995,9 @@ EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are * passed the same srcu_struct structure. * + * Implementation of these memory-ordering guarantees is similar to + * that of synchronize_rcu(). + * * If SRCU is likely idle, expedite the first request. This semantic * was provided by Classic SRCU, and is relied upon by its users, so TREE * SRCU must also provide it. Note that detecting idleness is heuristic @@ -1392,11 +1390,15 @@ void __init srcu_init(void) { struct srcu_struct *ssp; + /* + * Once that is set, call_srcu() can follow the normal path and + * queue delayed work. This must follow RCU workqueues creation + * and timers initialization. + */ srcu_init_done = true; while (!list_empty(&srcu_boot_list)) { ssp = list_first_entry(&srcu_boot_list, struct srcu_struct, work.work.entry); - check_init_srcu_struct(ssp); list_del_init(&ssp->work.work.entry); queue_work(rcu_gp_wq, &ssp->work.work); } diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c index d4558ab7a07d..33d896d85902 100644 --- a/kernel/rcu/sync.c +++ b/kernel/rcu/sync.c @@ -94,9 +94,9 @@ static void rcu_sync_func(struct rcu_head *rhp) rcu_sync_call(rsp); } else { /* - * We're at least a GP after the last rcu_sync_exit(); eveybody + * We're at least a GP after the last rcu_sync_exit(); everybody * will now have observed the write side critical section. - * Let 'em rip!. + * Let 'em rip! */ WRITE_ONCE(rsp->gp_state, GP_IDLE); } diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 350ebf5051f9..03a118d1c003 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -23,7 +23,7 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp); * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism. * @cbs_head: Head of callback list. * @cbs_tail: Tail pointer for callback list. - * @cbs_wq: Wait queue allowning new callback to get kthread's attention. + * @cbs_wq: Wait queue allowing new callback to get kthread's attention. * @cbs_lock: Lock protecting callback list. * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. * @gp_func: This flavor's grace-period-wait function. @@ -377,6 +377,46 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) // Finally, this implementation does not support high call_rcu_tasks() // rates from multiple CPUs. If this is required, per-CPU callback lists // will be needed. +// +// The implementation uses rcu_tasks_wait_gp(), which relies on function +// pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread() +// function sets these function pointers up so that rcu_tasks_wait_gp() +// invokes these functions in this order: +// +// rcu_tasks_pregp_step(): +// Invokes synchronize_rcu() in order to wait for all in-flight +// t->on_rq and t->nvcsw transitions to complete. This works because +// all such transitions are carried out with interrupts disabled. +// rcu_tasks_pertask(), invoked on every non-idle task: +// For every runnable non-idle task other than the current one, use +// get_task_struct() to pin down that task, snapshot that task's +// number of voluntary context switches, and add that task to the +// holdout list. +// rcu_tasks_postscan(): +// Invoke synchronize_srcu() to ensure that all tasks that were +// in the process of exiting (and which thus might not know to +// synchronize with this RCU Tasks grace period) have completed +// exiting. +// check_all_holdout_tasks(), repeatedly until holdout list is empty: +// Scans the holdout list, attempting to identify a quiescent state +// for each task on the list. If there is a quiescent state, the +// corresponding task is removed from the holdout list. +// rcu_tasks_postgp(): +// Invokes synchronize_rcu() in order to ensure that all prior +// t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks +// to have happened before the end of this RCU Tasks grace period. +// Again, this works because all such transitions are carried out +// with interrupts disabled. +// +// For each exiting task, the exit_tasks_rcu_start() and +// exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU +// read-side critical sections waited for by rcu_tasks_postscan(). +// +// Pre-grace-period update-side code is ordered before the grace via the +// ->cbs_lock and the smp_mb__after_spinlock(). Pre-grace-period read-side +// code is ordered before the grace period via synchronize_rcu() call +// in rcu_tasks_pregp_step() and by the scheduler's locks and interrupt +// disabling. /* Pre-grace-period preparation. */ static void rcu_tasks_pregp_step(void) @@ -504,7 +544,7 @@ DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); * or transition to usermode execution. As such, there are no read-side * primitives analogous to rcu_read_lock() and rcu_read_unlock() because * this primitive is intended to determine that all tasks have passed - * through a safe state, not so much for data-strcuture synchronization. + * through a safe state, not so much for data-structure synchronization. * * See the description of call_rcu() for more detailed information on * memory ordering guarantees. @@ -605,8 +645,13 @@ void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); } // passing an empty function to schedule_on_each_cpu(). This approach // provides an asynchronous call_rcu_tasks_rude() API and batching // of concurrent calls to the synchronous synchronize_rcu_rude() API. -// This sends IPIs far and wide and induces otherwise unnecessary context -// switches on all online CPUs, whether idle or not. +// This invokes schedule_on_each_cpu() in order to send IPIs far and wide +// and induces otherwise unnecessary context switches on all online CPUs, +// whether idle or not. +// +// Callback handling is provided by the rcu_tasks_kthread() function. +// +// Ordering is provided by the scheduler's context-switch code. // Empty function to allow workqueues to force a context switch. static void rcu_tasks_be_rude(struct work_struct *work) @@ -637,7 +682,7 @@ DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude, * there are no read-side primitives analogous to rcu_read_lock() and * rcu_read_unlock() because this primitive is intended to determine * that all tasks have passed through a safe state, not so much for - * data-strcuture synchronization. + * data-structure synchronization. * * See the description of call_rcu() for more detailed information on * memory ordering guarantees. @@ -1163,7 +1208,7 @@ static void exit_tasks_rcu_finish_trace(struct task_struct *t) * there are no read-side primitives analogous to rcu_read_lock() and * rcu_read_unlock() because this primitive is intended to determine * that all tasks have passed through a safe state, not so much for - * data-strcuture synchronization. + * data-structure synchronization. * * See the description of call_rcu() for more detailed information on * memory ordering guarantees. @@ -1356,5 +1401,4 @@ void __init rcu_init_tasks_generic(void) #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ static inline void rcu_tasks_bootup_oddness(void) {} -void show_rcu_tasks_gp_kthreads(void) {} #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index c8a029fbb114..340b3f8b090d 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -221,5 +221,4 @@ void __init rcu_init(void) { open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); rcu_early_boot_tests(); - srcu_init(); } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8e78b2430c16..51f24ecd94b2 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -32,6 +32,8 @@ #include <linux/export.h> #include <linux/completion.h> #include <linux/moduleparam.h> +#include <linux/panic.h> +#include <linux/panic_notifier.h> #include <linux/percpu.h> #include <linux/notifier.h> #include <linux/cpu.h> @@ -186,6 +188,17 @@ module_param(rcu_unlock_delay, int, 0444); static int rcu_min_cached_objs = 5; module_param(rcu_min_cached_objs, int, 0444); +// A page shrinker can ask for pages to be freed to make them +// available for other parts of the system. This usually happens +// under low memory conditions, and in that case we should also +// defer page-cache filling for a short time period. +// +// The default value is 5 seconds, which is long enough to reduce +// interference with the shrinker while it asks other systems to +// drain their caches. +static int rcu_delay_page_cache_fill_msec = 5000; +module_param(rcu_delay_page_cache_fill_msec, int, 0444); + /* Retrieve RCU kthreads priority for rcutorture */ int rcu_get_gp_kthreads_prio(void) { @@ -202,7 +215,7 @@ EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio); * the need for long delays to increase some race probabilities with the * need for fast grace periods to increase other race probabilities. */ -#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */ +#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays for debugging. */ /* * Compute the mask of online CPUs for the specified rcu_node structure. @@ -242,6 +255,7 @@ void rcu_softirq_qs(void) { rcu_qs(); rcu_preempt_deferred_qs(current); + rcu_tasks_qs(current, false); } /* @@ -833,28 +847,6 @@ void noinstr rcu_irq_exit(void) rcu_nmi_exit(); } -/** - * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq - * towards in kernel preemption - * - * Same as rcu_irq_exit() but has a sanity check that scheduling is safe - * from RCU point of view. Invoked from return from interrupt before kernel - * preemption. - */ -void rcu_irq_exit_preempt(void) -{ - lockdep_assert_irqs_disabled(); - rcu_nmi_exit(); - - RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0, - "RCU dynticks_nesting counter underflow/zero!"); - RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) != - DYNTICK_IRQ_NONIDLE, - "Bad RCU dynticks_nmi_nesting counter\n"); - RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), - "RCU in extended quiescent state!"); -} - #ifdef CONFIG_PROVE_RCU /** * rcu_irq_exit_check_preempt - Validate that scheduling is possible @@ -959,7 +951,7 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit); */ void noinstr rcu_user_exit(void) { - rcu_eqs_exit(1); + rcu_eqs_exit(true); } /** @@ -1225,7 +1217,7 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */ /* - * We are reporting a quiescent state on behalf of some other CPU, so + * When trying to report a quiescent state on behalf of some other CPU, * it is our responsibility to check for and handle potential overflow * of the rcu_node ->gp_seq counter with respect to the rcu_data counters. * After all, the CPU might be in deep idle state, and thus executing no @@ -2048,7 +2040,7 @@ static void rcu_gp_fqs_loop(void) /* * Clean up after the old grace period. */ -static void rcu_gp_cleanup(void) +static noinline void rcu_gp_cleanup(void) { int cpu; bool needgp = false; @@ -2489,7 +2481,7 @@ int rcutree_dead_cpu(unsigned int cpu) /* * Invoke any RCU callbacks that have made it to the end of their grace - * period. Thottle as specified by rdp->blimit. + * period. Throttle as specified by rdp->blimit. */ static void rcu_do_batch(struct rcu_data *rdp) { @@ -2629,7 +2621,7 @@ static void rcu_do_batch(struct rcu_data *rdp) * state, for example, user mode or idle loop. It also schedules RCU * core processing. If the current grace period has gone on too long, * it will ask the scheduler to manufacture a context switch for the sole - * purpose of providing a providing the needed quiescent state. + * purpose of providing the needed quiescent state. */ void rcu_sched_clock_irq(int user) { @@ -2911,7 +2903,6 @@ static int __init rcu_spawn_core_kthreads(void) "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__); return 0; } -early_initcall(rcu_spawn_core_kthreads); /* * Handle any core-RCU processing required by a call_rcu() invocation. @@ -3082,12 +3073,14 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func) * period elapses, in other words after all pre-existing RCU read-side * critical sections have completed. However, the callback function * might well execute concurrently with RCU read-side critical sections - * that started after call_rcu() was invoked. RCU read-side critical - * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and - * may be nested. In addition, regions of code across which interrupts, - * preemption, or softirqs have been disabled also serve as RCU read-side - * critical sections. This includes hardware interrupt handlers, softirq - * handlers, and NMI handlers. + * that started after call_rcu() was invoked. + * + * RCU read-side critical sections are delimited by rcu_read_lock() + * and rcu_read_unlock(), and may be nested. In addition, but only in + * v5.0 and later, regions of code across which interrupts, preemption, + * or softirqs have been disabled also serve as RCU read-side critical + * sections. This includes hardware interrupt handlers, softirq handlers, + * and NMI handlers. * * Note that all CPUs must agree that the grace period extended beyond * all pre-existing RCU read-side critical section. On systems with more @@ -3107,6 +3100,9 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func) * between the call to call_rcu() and the invocation of "func()" -- even * if CPU A and CPU B are the same CPU (but again only if the system has * more than one CPU). + * + * Implementation of these memory-ordering guarantees is described here: + * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst. */ void call_rcu(struct rcu_head *head, rcu_callback_t func) { @@ -3171,6 +3167,7 @@ struct kfree_rcu_cpu_work { * Even though it is lockless an access has to be protected by the * per-cpu lock. * @page_cache_work: A work to refill the cache when it is empty + * @backoff_page_cache_fill: Delay cache refills * @work_in_progress: Indicates that page_cache_work is running * @hrtimer: A hrtimer for scheduling a page_cache_work * @nr_bkv_objs: number of allocated objects at @bkvcache. @@ -3190,7 +3187,8 @@ struct kfree_rcu_cpu { bool initialized; int count; - struct work_struct page_cache_work; + struct delayed_work page_cache_work; + atomic_t backoff_page_cache_fill; atomic_t work_in_progress; struct hrtimer hrtimer; @@ -3237,7 +3235,7 @@ get_cached_bnode(struct kfree_rcu_cpu *krcp) if (!krcp->nr_bkv_objs) return NULL; - krcp->nr_bkv_objs--; + WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1); return (struct kvfree_rcu_bulk_data *) llist_del_first(&krcp->bkvcache); } @@ -3251,14 +3249,33 @@ put_cached_bnode(struct kfree_rcu_cpu *krcp, return false; llist_add((struct llist_node *) bnode, &krcp->bkvcache); - krcp->nr_bkv_objs++; + WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1); return true; +} + +static int +drain_page_cache(struct kfree_rcu_cpu *krcp) +{ + unsigned long flags; + struct llist_node *page_list, *pos, *n; + int freed = 0; + raw_spin_lock_irqsave(&krcp->lock, flags); + page_list = llist_del_all(&krcp->bkvcache); + WRITE_ONCE(krcp->nr_bkv_objs, 0); + raw_spin_unlock_irqrestore(&krcp->lock, flags); + + llist_for_each_safe(pos, n, page_list) { + free_page((unsigned long)pos); + freed++; + } + + return freed; } /* * This function is invoked in workqueue context after a grace period. - * It frees all the objects queued on ->bhead_free or ->head_free. + * It frees all the objects queued on ->bkvhead_free or ->head_free. */ static void kfree_rcu_work(struct work_struct *work) { @@ -3285,7 +3302,7 @@ static void kfree_rcu_work(struct work_struct *work) krwp->head_free = NULL; raw_spin_unlock_irqrestore(&krcp->lock, flags); - // Handle two first channels. + // Handle the first two channels. for (i = 0; i < FREE_N_CHANNELS; i++) { for (; bkvhead[i]; bkvhead[i] = bnext) { bnext = bkvhead[i]->next; @@ -3323,9 +3340,11 @@ static void kfree_rcu_work(struct work_struct *work) } /* - * Emergency case only. It can happen under low memory - * condition when an allocation gets failed, so the "bulk" - * path can not be temporary maintained. + * This is used when the "bulk" path can not be used for the + * double-argument of kvfree_rcu(). This happens when the + * page-cache is empty, which means that objects are instead + * queued on a linked list through their rcu_head structures. + * This list is named "Channel 3". */ for (; head; head = next) { unsigned long offset = (unsigned long)head->func; @@ -3345,34 +3364,31 @@ static void kfree_rcu_work(struct work_struct *work) } /* - * Schedule the kfree batch RCU work to run in workqueue context after a GP. - * - * This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES - * timeout has been reached. + * This function is invoked after the KFREE_DRAIN_JIFFIES timeout. */ -static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp) +static void kfree_rcu_monitor(struct work_struct *work) { - struct kfree_rcu_cpu_work *krwp; - bool repeat = false; + struct kfree_rcu_cpu *krcp = container_of(work, + struct kfree_rcu_cpu, monitor_work.work); + unsigned long flags; int i, j; - lockdep_assert_held(&krcp->lock); + raw_spin_lock_irqsave(&krcp->lock, flags); + // Attempt to start a new batch. for (i = 0; i < KFREE_N_BATCHES; i++) { - krwp = &(krcp->krw_arr[i]); + struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]); - /* - * Try to detach bkvhead or head and attach it over any - * available corresponding free channel. It can be that - * a previous RCU batch is in progress, it means that - * immediately to queue another one is not possible so - * return false to tell caller to retry. - */ + // Try to detach bkvhead or head and attach it over any + // available corresponding free channel. It can be that + // a previous RCU batch is in progress, it means that + // immediately to queue another one is not possible so + // in that case the monitor work is rearmed. if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) || (krcp->bkvhead[1] && !krwp->bkvhead_free[1]) || (krcp->head && !krwp->head_free)) { - // Channel 1 corresponds to SLAB ptrs. - // Channel 2 corresponds to vmalloc ptrs. + // Channel 1 corresponds to the SLAB-pointer bulk path. + // Channel 2 corresponds to vmalloc-pointer bulk path. for (j = 0; j < FREE_N_CHANNELS; j++) { if (!krwp->bkvhead_free[j]) { krwp->bkvhead_free[j] = krcp->bkvhead[j]; @@ -3380,7 +3396,8 @@ static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp) } } - // Channel 3 corresponds to emergency path. + // Channel 3 corresponds to both SLAB and vmalloc + // objects queued on the linked list. if (!krwp->head_free) { krwp->head_free = krcp->head; krcp->head = NULL; @@ -3388,65 +3405,35 @@ static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp) WRITE_ONCE(krcp->count, 0); - /* - * One work is per one batch, so there are three - * "free channels", the batch can handle. It can - * be that the work is in the pending state when - * channels have been detached following by each - * other. - */ + // One work is per one batch, so there are three + // "free channels", the batch can handle. It can + // be that the work is in the pending state when + // channels have been detached following by each + // other. queue_rcu_work(system_wq, &krwp->rcu_work); } - - // Repeat if any "free" corresponding channel is still busy. - if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head) - repeat = true; } - return !repeat; -} - -static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp, - unsigned long flags) -{ - // Attempt to start a new batch. - krcp->monitor_todo = false; - if (queue_kfree_rcu_work(krcp)) { - // Success! Our job is done here. - raw_spin_unlock_irqrestore(&krcp->lock, flags); - return; - } + // If there is nothing to detach, it means that our job is + // successfully done here. In case of having at least one + // of the channels that is still busy we should rearm the + // work to repeat an attempt. Because previous batches are + // still in progress. + if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) + krcp->monitor_todo = false; + else + schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); - // Previous RCU batch still in progress, try again later. - krcp->monitor_todo = true; - schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); raw_spin_unlock_irqrestore(&krcp->lock, flags); } -/* - * This function is invoked after the KFREE_DRAIN_JIFFIES timeout. - * It invokes kfree_rcu_drain_unlock() to attempt to start another batch. - */ -static void kfree_rcu_monitor(struct work_struct *work) -{ - unsigned long flags; - struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu, - monitor_work.work); - - raw_spin_lock_irqsave(&krcp->lock, flags); - if (krcp->monitor_todo) - kfree_rcu_drain_unlock(krcp, flags); - else - raw_spin_unlock_irqrestore(&krcp->lock, flags); -} - static enum hrtimer_restart schedule_page_work_fn(struct hrtimer *t) { struct kfree_rcu_cpu *krcp = container_of(t, struct kfree_rcu_cpu, hrtimer); - queue_work(system_highpri_wq, &krcp->page_cache_work); + queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0); return HRTIMER_NORESTART; } @@ -3455,12 +3442,16 @@ static void fill_page_cache_func(struct work_struct *work) struct kvfree_rcu_bulk_data *bnode; struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu, - page_cache_work); + page_cache_work.work); unsigned long flags; + int nr_pages; bool pushed; int i; - for (i = 0; i < rcu_min_cached_objs; i++) { + nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ? + 1 : rcu_min_cached_objs; + + for (i = 0; i < nr_pages; i++) { bnode = (struct kvfree_rcu_bulk_data *) __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); @@ -3477,6 +3468,7 @@ static void fill_page_cache_func(struct work_struct *work) } atomic_set(&krcp->work_in_progress, 0); + atomic_set(&krcp->backoff_page_cache_fill, 0); } static void @@ -3484,10 +3476,15 @@ run_page_cache_worker(struct kfree_rcu_cpu *krcp) { if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING && !atomic_xchg(&krcp->work_in_progress, 1)) { - hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL); - krcp->hrtimer.function = schedule_page_work_fn; - hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL); + if (atomic_read(&krcp->backoff_page_cache_fill)) { + queue_delayed_work(system_wq, + &krcp->page_cache_work, + msecs_to_jiffies(rcu_delay_page_cache_fill_msec)); + } else { + hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + krcp->hrtimer.function = schedule_page_work_fn; + hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL); + } } } @@ -3552,11 +3549,11 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp, } /* - * Queue a request for lazy invocation of appropriate free routine after a - * grace period. Please note there are three paths are maintained, two are the - * main ones that use array of pointers interface and third one is emergency - * one, that is used only when the main path can not be maintained temporary, - * due to memory pressure. + * Queue a request for lazy invocation of the appropriate free routine + * after a grace period. Please note that three paths are maintained, + * two for the common case using arrays of pointers and a third one that + * is used only when the main paths cannot be used, for example, due to + * memory pressure. * * Each kvfree_call_rcu() request is added to a batch. The batch will be drained * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will @@ -3645,6 +3642,8 @@ kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); count += READ_ONCE(krcp->count); + count += READ_ONCE(krcp->nr_bkv_objs); + atomic_set(&krcp->backoff_page_cache_fill, 1); } return count; @@ -3654,18 +3653,14 @@ static unsigned long kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { int cpu, freed = 0; - unsigned long flags; for_each_possible_cpu(cpu) { int count; struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); count = krcp->count; - raw_spin_lock_irqsave(&krcp->lock, flags); - if (krcp->monitor_todo) - kfree_rcu_drain_unlock(krcp, flags); - else - raw_spin_unlock_irqrestore(&krcp->lock, flags); + count += drain_page_cache(krcp); + kfree_rcu_monitor(&krcp->monitor_work.work); sc->nr_to_scan -= count; freed += count; @@ -3693,7 +3688,8 @@ void __init kfree_rcu_scheduler_running(void) struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); raw_spin_lock_irqsave(&krcp->lock, flags); - if (!krcp->head || krcp->monitor_todo) { + if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) || + krcp->monitor_todo) { raw_spin_unlock_irqrestore(&krcp->lock, flags); continue; } @@ -3750,10 +3746,12 @@ static int rcu_blocking_is_gp(void) * read-side critical sections have completed. Note, however, that * upon return from synchronize_rcu(), the caller might well be executing * concurrently with new RCU read-side critical sections that began while - * synchronize_rcu() was waiting. RCU read-side critical sections are - * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. - * In addition, regions of code across which interrupts, preemption, or - * softirqs have been disabled also serve as RCU read-side critical + * synchronize_rcu() was waiting. + * + * RCU read-side critical sections are delimited by rcu_read_lock() + * and rcu_read_unlock(), and may be nested. In addition, but only in + * v5.0 and later, regions of code across which interrupts, preemption, + * or softirqs have been disabled also serve as RCU read-side critical * sections. This includes hardware interrupt handlers, softirq handlers, * and NMI handlers. * @@ -3774,6 +3772,9 @@ static int rcu_blocking_is_gp(void) * to have executed a full memory barrier during the execution of * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but * again only if the system has more than one CPU). + * + * Implementation of these memory-ordering guarantees is described here: + * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst. */ void synchronize_rcu(void) { @@ -3844,11 +3845,11 @@ EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu); /** * poll_state_synchronize_rcu - Conditionally wait for an RCU grace period * - * @oldstate: return from call to get_state_synchronize_rcu() or start_poll_synchronize_rcu() + * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu() * * If a full RCU grace period has elapsed since the earlier call from * which oldstate was obtained, return @true, otherwise return @false. - * If @false is returned, it is the caller's responsibilty to invoke this + * If @false is returned, it is the caller's responsibility to invoke this * function later on until it does return @true. Alternatively, the caller * can explicitly wait for a grace period, for example, by passing @oldstate * to cond_synchronize_rcu() or by directly invoking synchronize_rcu(). @@ -3860,6 +3861,11 @@ EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu); * (many hours even on 32-bit systems) should check them occasionally * and either refresh them or set a flag indicating that the grace period * has completed. + * + * This function provides the same memory-ordering guarantees that + * would be provided by a synchronize_rcu() that was invoked at the call + * to the function that provided @oldstate, and that returned at the end + * of this function. */ bool poll_state_synchronize_rcu(unsigned long oldstate) { @@ -3874,7 +3880,7 @@ EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu); /** * cond_synchronize_rcu - Conditionally wait for an RCU grace period * - * @oldstate: return value from earlier call to get_state_synchronize_rcu() + * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu() * * If a full RCU grace period has elapsed since the earlier call to * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return. @@ -3884,6 +3890,11 @@ EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu); * counter wrap is harmless. If the counter wraps, we have waited for * more than 2 billion grace periods (and way more on a 64-bit system!), * so waiting for one additional grace period should be just fine. + * + * This function provides the same memory-ordering guarantees that + * would be provided by a synchronize_rcu() that was invoked at the call + * to the function that provided @oldstate, and that returned at the end + * of this function. */ void cond_synchronize_rcu(unsigned long oldstate) { @@ -3911,7 +3922,7 @@ static int rcu_pending(int user) check_cpu_stall(rdp); /* Does this CPU need a deferred NOCB wakeup? */ - if (rcu_nocb_need_deferred_wakeup(rdp)) + if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE)) return 1; /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */ @@ -4094,7 +4105,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier); /* * Propagate ->qsinitmask bits up the rcu_node tree to account for the * first CPU in a given leaf rcu_node structure coming online. The caller - * must hold the corresponding leaf rcu_node ->lock with interrrupts + * must hold the corresponding leaf rcu_node ->lock with interrupts * disabled. */ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) @@ -4189,7 +4200,7 @@ int rcutree_prepare_cpu(unsigned int cpu) rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - rcu_prepare_kthreads(cpu); + rcu_spawn_one_boost_kthread(rnp); rcu_spawn_cpu_nocb_kthread(cpu); WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1); @@ -4472,6 +4483,7 @@ static int __init rcu_spawn_gp_kthread(void) wake_up_process(t); rcu_spawn_nocb_kthreads(); rcu_spawn_boost_kthreads(); + rcu_spawn_core_kthreads(); return 0; } early_initcall(rcu_spawn_gp_kthread); @@ -4582,11 +4594,25 @@ static void __init rcu_init_one(void) * replace the definitions in tree.h because those are needed to size * the ->node array in the rcu_state structure. */ -static void __init rcu_init_geometry(void) +void rcu_init_geometry(void) { ulong d; int i; + static unsigned long old_nr_cpu_ids; int rcu_capacity[RCU_NUM_LVLS]; + static bool initialized; + + if (initialized) { + /* + * Warn if setup_nr_cpu_ids() had not yet been invoked, + * unless nr_cpus_ids == NR_CPUS, in which case who cares? + */ + WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids); + return; + } + + old_nr_cpu_ids = nr_cpu_ids; + initialized = true; /* * Initialize any unspecified boot parameters. @@ -4687,6 +4713,18 @@ static void __init kfree_rcu_batch_init(void) int cpu; int i; + /* Clamp it to [0:100] seconds interval. */ + if (rcu_delay_page_cache_fill_msec < 0 || + rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) { + + rcu_delay_page_cache_fill_msec = + clamp(rcu_delay_page_cache_fill_msec, 0, + (int) (100 * MSEC_PER_SEC)); + + pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n", + rcu_delay_page_cache_fill_msec); + } + for_each_possible_cpu(cpu) { struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); @@ -4696,7 +4734,7 @@ static void __init kfree_rcu_batch_init(void) } INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor); - INIT_WORK(&krcp->page_cache_work, fill_page_cache_func); + INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func); krcp->initialized = true; } if (register_shrinker(&kfree_rcu_shrinker)) @@ -4730,12 +4768,11 @@ void __init rcu_init(void) rcutree_online_cpu(cpu); } - /* Create workqueue for expedited GPs and for Tree SRCU. */ + /* Create workqueue for Tree SRCU and for expedited GPs. */ rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0); WARN_ON(!rcu_gp_wq); rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0); WARN_ON(!rcu_par_gp_wq); - srcu_init(); /* Fill in default value for rcutree.qovld boot parameter. */ /* -After- the rcu_node ->lock fields are initialized! */ diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 71821d59d95c..305cf6aeb408 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -115,6 +115,7 @@ struct rcu_node { /* boosting for this rcu_node structure. */ unsigned int boost_kthread_status; /* State of boost_kthread_task for tracing. */ + unsigned long n_boosts; /* Number of boosts for this rcu_node structure. */ #ifdef CONFIG_RCU_NOCB_CPU struct swait_queue_head nocb_gp_wq[2]; /* Place for rcu_nocb_kthread() to wait GP. */ @@ -153,7 +154,7 @@ struct rcu_data { unsigned long gp_seq; /* Track rsp->gp_seq counter. */ unsigned long gp_seq_needed; /* Track furthest future GP request. */ union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ - bool core_needs_qs; /* Core waits for quiesc state. */ + bool core_needs_qs; /* Core waits for quiescent state. */ bool beenonline; /* CPU online at least once. */ bool gpwrap; /* Possible ->gp_seq wrap. */ bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */ @@ -218,7 +219,6 @@ struct rcu_data { /* The following fields are used by GP kthread, hence own cacheline. */ raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp; - struct timer_list nocb_bypass_timer; /* Force nocb_bypass flush. */ u8 nocb_gp_sleep; /* Is the nocb GP thread asleep? */ u8 nocb_gp_bypass; /* Found a bypass on last scan? */ u8 nocb_gp_gp; /* GP to wait for on last scan? */ @@ -257,10 +257,10 @@ struct rcu_data { }; /* Values for nocb_defer_wakeup field in struct rcu_data. */ -#define RCU_NOCB_WAKE_OFF -1 #define RCU_NOCB_WAKE_NOT 0 -#define RCU_NOCB_WAKE 1 -#define RCU_NOCB_WAKE_FORCE 2 +#define RCU_NOCB_WAKE_BYPASS 1 +#define RCU_NOCB_WAKE 2 +#define RCU_NOCB_WAKE_FORCE 3 #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500)) /* For jiffies_till_first_fqs and */ @@ -417,8 +417,8 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); static bool rcu_is_callbacks_kthread(void); static void rcu_cpu_kthread_setup(unsigned int cpu); +static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp); static void __init rcu_spawn_boost_kthreads(void); -static void rcu_prepare_kthreads(int cpu); static void rcu_cleanup_after_idle(void); static void rcu_prepare_for_idle(void); static bool rcu_preempt_has_tasks(struct rcu_node *rnp); @@ -434,7 +434,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, bool *was_alldone, unsigned long flags); static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty, unsigned long flags); -static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp); +static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level); static bool do_nocb_deferred_wakeup(struct rcu_data *rdp); static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); static void rcu_spawn_cpu_nocb_kthread(int cpu); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index ad0156b86937..de1dc3bb7f70 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -33,10 +33,6 @@ static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp) return false; } -static inline bool rcu_running_nocb_timer(struct rcu_data *rdp) -{ - return (timer_curr_running(&rdp->nocb_timer) && !in_irq()); -} #else static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp) { @@ -48,11 +44,6 @@ static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp) return false; } -static inline bool rcu_running_nocb_timer(struct rcu_data *rdp) -{ - return false; -} - #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ static bool rcu_rdp_is_offloaded(struct rcu_data *rdp) @@ -72,8 +63,7 @@ static bool rcu_rdp_is_offloaded(struct rcu_data *rdp) rcu_lockdep_is_held_nocb(rdp) || (rdp == this_cpu_ptr(&rcu_data) && !(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible())) || - rcu_current_is_nocb_kthread(rdp) || - rcu_running_nocb_timer(rdp)), + rcu_current_is_nocb_kthread(rdp)), "Unsafe read of RCU_NOCB offloaded state" ); @@ -1098,6 +1088,7 @@ static int rcu_boost(struct rcu_node *rnp) /* Lock only for side effect: boosts task t's priority. */ rt_mutex_lock(&rnp->boost_mtx); rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ + rnp->n_boosts++; return READ_ONCE(rnp->exp_tasks) != NULL || READ_ONCE(rnp->boost_tasks) != NULL; @@ -1197,22 +1188,16 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) */ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) { - int rnp_index = rnp - rcu_get_root(); unsigned long flags; + int rnp_index = rnp - rcu_get_root(); struct sched_param sp; struct task_struct *t; - if (!IS_ENABLED(CONFIG_PREEMPT_RCU)) - return; - - if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0) + if (rnp->boost_kthread_task || !rcu_scheduler_fully_active) return; rcu_state.boost = 1; - if (rnp->boost_kthread_task != NULL) - return; - t = kthread_create(rcu_boost_kthread, (void *)rnp, "rcub/%d", rnp_index); if (WARN_ON_ONCE(IS_ERR(t))) @@ -1264,17 +1249,8 @@ static void __init rcu_spawn_boost_kthreads(void) struct rcu_node *rnp; rcu_for_each_leaf_node(rnp) - rcu_spawn_one_boost_kthread(rnp); -} - -static void rcu_prepare_kthreads(int cpu) -{ - struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); - struct rcu_node *rnp = rdp->mynode; - - /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ - if (rcu_scheduler_fully_active) - rcu_spawn_one_boost_kthread(rnp); + if (rcu_rnp_online_cpus(rnp)) + rcu_spawn_one_boost_kthread(rnp); } #else /* #ifdef CONFIG_RCU_BOOST */ @@ -1294,15 +1270,15 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) { } -static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) +static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) { } -static void __init rcu_spawn_boost_kthreads(void) +static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) { } -static void rcu_prepare_kthreads(int cpu) +static void __init rcu_spawn_boost_kthreads(void) { } @@ -1535,13 +1511,10 @@ static void rcu_cleanup_after_idle(void) static int __init rcu_nocb_setup(char *str) { alloc_bootmem_cpumask_var(&rcu_nocb_mask); - if (!strcasecmp(str, "all")) /* legacy: use "0-N" instead */ + if (cpulist_parse(str, rcu_nocb_mask)) { + pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n"); cpumask_setall(rcu_nocb_mask); - else - if (cpulist_parse(str, rcu_nocb_mask)) { - pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n"); - cpumask_setall(rcu_nocb_mask); - } + } return 1; } __setup("rcu_nocbs=", rcu_nocb_setup); @@ -1692,56 +1665,78 @@ bool rcu_is_nocb_cpu(int cpu) return false; } -/* - * Kick the GP kthread for this NOCB group. Caller holds ->nocb_lock - * and this function releases it. - */ -static bool wake_nocb_gp(struct rcu_data *rdp, bool force, - unsigned long flags) - __releases(rdp->nocb_lock) +static bool __wake_nocb_gp(struct rcu_data *rdp_gp, + struct rcu_data *rdp, + bool force, unsigned long flags) + __releases(rdp_gp->nocb_gp_lock) { bool needwake = false; - struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; - lockdep_assert_held(&rdp->nocb_lock); if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) { - rcu_nocb_unlock_irqrestore(rdp, flags); + raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("AlreadyAwake")); return false; } - if (READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT) { - WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); - del_timer(&rdp->nocb_timer); + if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) { + WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); + del_timer(&rdp_gp->nocb_timer); } - rcu_nocb_unlock_irqrestore(rdp, flags); - raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); + if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) { WRITE_ONCE(rdp_gp->nocb_gp_sleep, false); needwake = true; - trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake")); } raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); - if (needwake) + if (needwake) { + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake")); wake_up_process(rdp_gp->nocb_gp_kthread); + } return needwake; } /* + * Kick the GP kthread for this NOCB group. + */ +static bool wake_nocb_gp(struct rcu_data *rdp, bool force) +{ + unsigned long flags; + struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; + + raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); + return __wake_nocb_gp(rdp_gp, rdp, force, flags); +} + +/* * Arrange to wake the GP kthread for this NOCB group at some future * time when it is safe to do so. */ static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype, const char *reason) { - if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_OFF) - return; - if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) - mod_timer(&rdp->nocb_timer, jiffies + 1); - if (rdp->nocb_defer_wakeup < waketype) - WRITE_ONCE(rdp->nocb_defer_wakeup, waketype); + unsigned long flags; + struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; + + raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); + + /* + * Bypass wakeup overrides previous deferments. In case + * of callback storm, no need to wake up too early. + */ + if (waketype == RCU_NOCB_WAKE_BYPASS) { + mod_timer(&rdp_gp->nocb_timer, jiffies + 2); + WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype); + } else { + if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE) + mod_timer(&rdp_gp->nocb_timer, jiffies + 1); + if (rdp_gp->nocb_defer_wakeup < waketype) + WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype); + } + + raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason); } @@ -1940,7 +1935,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, } /* - * Awaken the no-CBs grace-period kthead if needed, either due to it + * Awaken the no-CBs grace-period kthread if needed, either due to it * legitimately being asleep or due to overload conditions. * * If warranted, also wake up the kthread servicing this CPUs queues. @@ -1968,13 +1963,14 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, rdp->qlen_last_fqs_check = len; if (!irqs_disabled_flags(flags)) { /* ... if queue was empty ... */ - wake_nocb_gp(rdp, false, flags); + rcu_nocb_unlock_irqrestore(rdp, flags); + wake_nocb_gp(rdp, false); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeEmpty")); } else { + rcu_nocb_unlock_irqrestore(rdp, flags); wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE, TPS("WakeEmptyIsDeferred")); - rcu_nocb_unlock_irqrestore(rdp, flags); } } else if (len > rdp->qlen_last_fqs_check + qhimark) { /* ... or if many callbacks queued. */ @@ -1989,10 +1985,14 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, smp_mb(); /* Enqueue before timer_pending(). */ if ((rdp->nocb_cb_sleep || !rcu_segcblist_ready_cbs(&rdp->cblist)) && - !timer_pending(&rdp->nocb_bypass_timer)) + !timer_pending(&rdp->nocb_timer)) { + rcu_nocb_unlock_irqrestore(rdp, flags); wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE, TPS("WakeOvfIsDeferred")); - rcu_nocb_unlock_irqrestore(rdp, flags); + } else { + rcu_nocb_unlock_irqrestore(rdp, flags); + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); + } } else { rcu_nocb_unlock_irqrestore(rdp, flags); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); @@ -2000,18 +2000,6 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, return; } -/* Wake up the no-CBs GP kthread to flush ->nocb_bypass. */ -static void do_nocb_bypass_wakeup_timer(struct timer_list *t) -{ - unsigned long flags; - struct rcu_data *rdp = from_timer(rdp, t, nocb_bypass_timer); - - trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer")); - rcu_nocb_lock_irqsave(rdp, flags); - smp_mb__after_spinlock(); /* Timer expire before wakeup. */ - __call_rcu_nocb_wake(rdp, true, flags); -} - /* * Check if we ignore this rdp. * @@ -2118,11 +2106,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp) bypass = true; } rnp = rdp->mynode; - if (bypass) { // Avoid race with first bypass CB. - WRITE_ONCE(my_rdp->nocb_defer_wakeup, - RCU_NOCB_WAKE_NOT); - del_timer(&my_rdp->nocb_timer); - } + // Advance callbacks if helpful and low contention. needwake_gp = false; if (!rcu_segcblist_restempty(&rdp->cblist, @@ -2168,12 +2152,12 @@ static void nocb_gp_wait(struct rcu_data *my_rdp) my_rdp->nocb_gp_bypass = bypass; my_rdp->nocb_gp_gp = needwait_gp; my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0; + if (bypass && !rcu_nocb_poll) { // At least one child with non-empty ->nocb_bypass, so set // timer in order to avoid stranding its callbacks. - raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags); - mod_timer(&my_rdp->nocb_bypass_timer, j + 2); - raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); + wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS, + TPS("WakeBypassIsDeferred")); } if (rcu_nocb_poll) { /* Polling, so trace if first poll in the series. */ @@ -2197,8 +2181,10 @@ static void nocb_gp_wait(struct rcu_data *my_rdp) } if (!rcu_nocb_poll) { raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags); - if (bypass) - del_timer(&my_rdp->nocb_bypass_timer); + if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) { + WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); + del_timer(&my_rdp->nocb_timer); + } WRITE_ONCE(my_rdp->nocb_gp_sleep, true); raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); } @@ -2334,25 +2320,27 @@ static int rcu_nocb_cb_kthread(void *arg) } /* Is a deferred wakeup of rcu_nocb_kthread() required? */ -static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) +static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level) { - return READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT; + return READ_ONCE(rdp->nocb_defer_wakeup) >= level; } /* Do a deferred wakeup of rcu_nocb_kthread(). */ -static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp) +static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp, + struct rcu_data *rdp, int level, + unsigned long flags) + __releases(rdp_gp->nocb_gp_lock) { - unsigned long flags; int ndw; int ret; - rcu_nocb_lock_irqsave(rdp, flags); - if (!rcu_nocb_need_deferred_wakeup(rdp)) { - rcu_nocb_unlock_irqrestore(rdp, flags); + if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) { + raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); return false; } - ndw = READ_ONCE(rdp->nocb_defer_wakeup); - ret = wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags); + + ndw = rdp_gp->nocb_defer_wakeup; + ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake")); return ret; @@ -2361,9 +2349,15 @@ static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp) /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */ static void do_nocb_deferred_wakeup_timer(struct timer_list *t) { + unsigned long flags; struct rcu_data *rdp = from_timer(rdp, t, nocb_timer); - do_nocb_deferred_wakeup_common(rdp); + WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp); + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer")); + + raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags); + smp_mb__after_spinlock(); /* Timer expire before wakeup. */ + do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags); } /* @@ -2373,9 +2367,14 @@ static void do_nocb_deferred_wakeup_timer(struct timer_list *t) */ static bool do_nocb_deferred_wakeup(struct rcu_data *rdp) { - if (rcu_nocb_need_deferred_wakeup(rdp)) - return do_nocb_deferred_wakeup_common(rdp); - return false; + unsigned long flags; + struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; + + if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE)) + return false; + + raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); + return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags); } void rcu_nocb_flush_deferred_wakeup(void) @@ -2443,17 +2442,15 @@ static long rcu_nocb_rdp_deoffload(void *arg) swait_event_exclusive(rdp->nocb_state_wq, !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP)); - rcu_nocb_lock_irqsave(rdp, flags); - /* Make sure nocb timer won't stay around */ - WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_OFF); - rcu_nocb_unlock_irqrestore(rdp, flags); - del_timer_sync(&rdp->nocb_timer); - /* - * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY with CB unlocked - * and IRQs disabled but let's be paranoid. + * Lock one last time to acquire latest callback updates from kthreads + * so we can later handle callbacks locally without locking. */ rcu_nocb_lock_irqsave(rdp, flags); + /* + * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY after the nocb + * lock is released but how about being paranoid for once? + */ rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY); /* * With SEGCBLIST_SOFTIRQ_ONLY, we can't use @@ -2473,10 +2470,6 @@ int rcu_nocb_cpu_deoffload(int cpu) struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); int ret = 0; - if (rdp == rdp->nocb_gp_rdp) { - pr_info("Can't deoffload an rdp GP leader (yet)\n"); - return -EINVAL; - } mutex_lock(&rcu_state.barrier_mutex); cpus_read_lock(); if (rcu_rdp_is_offloaded(rdp)) { @@ -2517,8 +2510,7 @@ static long rcu_nocb_rdp_offload(void *arg) * SEGCBLIST_SOFTIRQ_ONLY mode. */ raw_spin_lock_irqsave(&rdp->nocb_lock, flags); - /* Re-enable nocb timer */ - WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); + /* * We didn't take the nocb lock while working on the * rdp->cblist in SEGCBLIST_SOFTIRQ_ONLY mode. @@ -2626,7 +2618,6 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) raw_spin_lock_init(&rdp->nocb_bypass_lock); raw_spin_lock_init(&rdp->nocb_gp_lock); timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0); - timer_setup(&rdp->nocb_bypass_timer, do_nocb_bypass_wakeup_timer, 0); rcu_cblist_init(&rdp->nocb_bypass); } @@ -2768,7 +2759,7 @@ EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb); #ifdef CONFIG_SMP static char *show_rcu_should_be_on_cpu(struct task_struct *tsp) { - return tsp && tsp->state == TASK_RUNNING && !tsp->on_cpu ? "!" : ""; + return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : ""; } #else // #ifdef CONFIG_SMP static char *show_rcu_should_be_on_cpu(struct task_struct *tsp) @@ -2785,13 +2776,12 @@ static void show_rcu_nocb_gp_state(struct rcu_data *rdp) { struct rcu_node *rnp = rdp->mynode; - pr_info("nocb GP %d %c%c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n", + pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n", rdp->cpu, "kK"[!!rdp->nocb_gp_kthread], "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)], "dD"[!!rdp->nocb_defer_wakeup], "tT"[timer_pending(&rdp->nocb_timer)], - "bB"[timer_pending(&rdp->nocb_bypass_timer)], "sS"[!!rdp->nocb_gp_sleep], ".W"[swait_active(&rdp->nocb_gp_wq)], ".W"[swait_active(&rnp->nocb_gp_wq[0])], @@ -2812,7 +2802,6 @@ static void show_rcu_nocb_state(struct rcu_data *rdp) char bufr[20]; struct rcu_segcblist *rsclp = &rdp->cblist; bool waslocked; - bool wastimer; bool wassleep; if (rdp->nocb_gp_rdp == rdp) @@ -2849,15 +2838,13 @@ static void show_rcu_nocb_state(struct rcu_data *rdp) return; waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock); - wastimer = timer_pending(&rdp->nocb_bypass_timer); wassleep = swait_active(&rdp->nocb_gp_wq); - if (!rdp->nocb_gp_sleep && !waslocked && !wastimer && !wassleep) - return; /* Nothing untowards. */ + if (!rdp->nocb_gp_sleep && !waslocked && !wassleep) + return; /* Nothing untoward. */ - pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c%c %c\n", + pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c %c\n", "lL"[waslocked], "dD"[!!rdp->nocb_defer_wakeup], - "tT"[wastimer], "sS"[!!rdp->nocb_gp_sleep], ".W"[wassleep]); } @@ -2922,7 +2909,7 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) { } -static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) +static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level) { return false; } diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h index 59b95cc5cbdf..3f937b20814f 100644 --- a/kernel/rcu/tree_stall.h +++ b/kernel/rcu/tree_stall.h @@ -314,6 +314,7 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) * tasks blocked within RCU read-side critical sections. */ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags) + __releases(rnp->lock) { raw_spin_unlock_irqrestore_rcu_node(rnp, flags); return 0; @@ -460,12 +461,12 @@ static void rcu_check_gp_kthread_starvation(void) if (rcu_is_gp_kthread_starving(&j)) { cpu = gpk ? task_cpu(gpk) : -1; - pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n", + pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n", rcu_state.name, j, (long)rcu_seq_current(&rcu_state.gp_seq), data_race(rcu_state.gp_flags), gp_state_getname(rcu_state.gp_state), rcu_state.gp_state, - gpk ? gpk->state : ~0, cpu); + gpk ? gpk->__state : ~0, cpu); if (gpk) { pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name); pr_err("RCU grace-period kthread stack dump:\n"); @@ -503,12 +504,12 @@ static void rcu_check_gp_kthread_expired_fqs_timer(void) time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) && gpk && !READ_ONCE(gpk->on_rq)) { cpu = task_cpu(gpk); - pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx\n", + pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x\n", rcu_state.name, (jiffies - jiffies_fqs), (long)rcu_seq_current(&rcu_state.gp_seq), data_race(rcu_state.gp_flags), gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS, - gpk->state); + gpk->__state); pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n", cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu)); } @@ -717,6 +718,63 @@ static void check_cpu_stall(struct rcu_data *rdp) /* + * Check to see if a failure to end RCU priority inversion was due to + * a CPU not passing through a quiescent state. When this happens, there + * is nothing that RCU priority boosting can do to help, so we shouldn't + * count this as an RCU priority boosting failure. A return of true says + * RCU priority boosting is to blame, and false says otherwise. If false + * is returned, the first of the CPUs to blame is stored through cpup. + * If there was no CPU blocking the current grace period, but also nothing + * in need of being boosted, *cpup is set to -1. This can happen in case + * of vCPU preemption while the last CPU is reporting its quiscent state, + * for example. + * + * If cpup is NULL, then a lockless quick check is carried out, suitable + * for high-rate usage. On the other hand, if cpup is non-NULL, each + * rcu_node structure's ->lock is acquired, ruling out high-rate usage. + */ +bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) +{ + bool atb = false; + int cpu; + unsigned long flags; + struct rcu_node *rnp; + + rcu_for_each_leaf_node(rnp) { + if (!cpup) { + if (READ_ONCE(rnp->qsmask)) { + return false; + } else { + if (READ_ONCE(rnp->gp_tasks)) + atb = true; + continue; + } + } + *cpup = -1; + raw_spin_lock_irqsave_rcu_node(rnp, flags); + if (rnp->gp_tasks) + atb = true; + if (!rnp->qsmask) { + // No CPUs without quiescent states for this rnp. + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + continue; + } + // Find the first holdout CPU. + for_each_leaf_node_possible_cpu(rnp, cpu) { + if (rnp->qsmask & (1UL << (cpu - rnp->grplo))) { + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + *cpup = cpu; + return false; + } + } + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + } + // Can't blame CPUs, so must blame RCU priority boosting. + return atb; +} +EXPORT_SYMBOL_GPL(rcu_check_boost_fail); + +/* * Show the state of the grace-period kthreads. */ void show_rcu_gp_kthreads(void) @@ -726,6 +784,7 @@ void show_rcu_gp_kthreads(void) unsigned long j; unsigned long ja; unsigned long jr; + unsigned long js; unsigned long jw; struct rcu_data *rdp; struct rcu_node *rnp; @@ -734,21 +793,30 @@ void show_rcu_gp_kthreads(void) j = jiffies; ja = j - data_race(rcu_state.gp_activity); jr = j - data_race(rcu_state.gp_req_activity); + js = j - data_race(rcu_state.gp_start); jw = j - data_race(rcu_state.gp_wake_time); - pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n", + pr_info("%s: wait state: %s(%d) ->state: %#lx ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n", rcu_state.name, gp_state_getname(rcu_state.gp_state), - rcu_state.gp_state, t ? t->state : 0x1ffffL, - ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq), + rcu_state.gp_state, t ? t->__state : 0x1ffffL, t ? t->rt_priority : 0xffU, + js, ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq), (long)data_race(rcu_state.gp_seq), (long)data_race(rcu_get_root()->gp_seq_needed), + data_race(rcu_state.gp_max), data_race(rcu_state.gp_flags)); rcu_for_each_node_breadth_first(rnp) { - if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), - READ_ONCE(rnp->gp_seq_needed))) + if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) && + !data_race(rnp->qsmask) && !data_race(rnp->boost_tasks) && + !data_race(rnp->exp_tasks) && !data_race(rnp->gp_tasks)) continue; - pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n", - rnp->grplo, rnp->grphi, (long)data_race(rnp->gp_seq), - (long)data_race(rnp->gp_seq_needed)); + pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n", + rnp->grplo, rnp->grphi, + (long)data_race(rnp->gp_seq), (long)data_race(rnp->gp_seq_needed), + data_race(rnp->qsmask), + ".b"[!!data_race(rnp->boost_kthread_task)], + ".B"[!!data_race(rnp->boost_tasks)], + ".E"[!!data_race(rnp->exp_tasks)], + ".G"[!!data_race(rnp->gp_tasks)], + data_race(rnp->n_boosts)); if (!rcu_is_leaf_node(rnp)) continue; for_each_leaf_node_possible_cpu(rnp, cpu) { diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index b95ae86c40a7..c21b38cc25e9 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -277,7 +277,7 @@ EXPORT_SYMBOL_GPL(rcu_callback_map); noinstr int notrace debug_lockdep_rcu_enabled(void) { - return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks && + return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && READ_ONCE(debug_locks) && current->lockdep_recursion == 0; } EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); @@ -524,6 +524,7 @@ static void test_callback(struct rcu_head *r) } DEFINE_STATIC_SRCU(early_srcu); +static unsigned long early_srcu_cookie; struct early_boot_kfree_rcu { struct rcu_head rh; @@ -536,8 +537,10 @@ static void early_boot_test_call_rcu(void) struct early_boot_kfree_rcu *rhp; call_rcu(&head, test_callback); - if (IS_ENABLED(CONFIG_SRCU)) + if (IS_ENABLED(CONFIG_SRCU)) { + early_srcu_cookie = start_poll_synchronize_srcu(&early_srcu); call_srcu(&early_srcu, &shead, test_callback); + } rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); if (!WARN_ON_ONCE(!rhp)) kfree_rcu(rhp, rh); @@ -563,6 +566,7 @@ static int rcu_verify_early_boot_tests(void) if (IS_ENABLED(CONFIG_SRCU)) { early_boot_test_counter++; srcu_barrier(&early_srcu); + WARN_ON_ONCE(!poll_state_synchronize_srcu(&early_srcu, early_srcu_cookie)); } } if (rcu_self_test_counter != early_boot_test_counter) { diff --git a/kernel/reboot.c b/kernel/reboot.c index a6ad5eb2fa73..f7440c0c7e43 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -7,6 +7,7 @@ #define pr_fmt(fmt) "reboot: " fmt +#include <linux/atomic.h> #include <linux/ctype.h> #include <linux/export.h> #include <linux/kexec.h> @@ -518,6 +519,84 @@ void orderly_reboot(void) } EXPORT_SYMBOL_GPL(orderly_reboot); +/** + * hw_failure_emergency_poweroff_func - emergency poweroff work after a known delay + * @work: work_struct associated with the emergency poweroff function + * + * This function is called in very critical situations to force + * a kernel poweroff after a configurable timeout value. + */ +static void hw_failure_emergency_poweroff_func(struct work_struct *work) +{ + /* + * We have reached here after the emergency shutdown waiting period has + * expired. This means orderly_poweroff has not been able to shut off + * the system for some reason. + * + * Try to shut down the system immediately using kernel_power_off + * if populated + */ + pr_emerg("Hardware protection timed-out. Trying forced poweroff\n"); + kernel_power_off(); + + /* + * Worst of the worst case trigger emergency restart + */ + pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n"); + emergency_restart(); +} + +static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work, + hw_failure_emergency_poweroff_func); + +/** + * hw_failure_emergency_poweroff - Trigger an emergency system poweroff + * + * This may be called from any critical situation to trigger a system shutdown + * after a given period of time. If time is negative this is not scheduled. + */ +static void hw_failure_emergency_poweroff(int poweroff_delay_ms) +{ + if (poweroff_delay_ms <= 0) + return; + schedule_delayed_work(&hw_failure_emergency_poweroff_work, + msecs_to_jiffies(poweroff_delay_ms)); +} + +/** + * hw_protection_shutdown - Trigger an emergency system poweroff + * + * @reason: Reason of emergency shutdown to be printed. + * @ms_until_forced: Time to wait for orderly shutdown before tiggering a + * forced shudown. Negative value disables the forced + * shutdown. + * + * Initiate an emergency system shutdown in order to protect hardware from + * further damage. Usage examples include a thermal protection or a voltage or + * current regulator failures. + * NOTE: The request is ignored if protection shutdown is already pending even + * if the previous request has given a large timeout for forced shutdown. + * Can be called from any context. + */ +void hw_protection_shutdown(const char *reason, int ms_until_forced) +{ + static atomic_t allow_proceed = ATOMIC_INIT(1); + + pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason); + + /* Shutdown should be initiated only once. */ + if (!atomic_dec_and_test(&allow_proceed)) + return; + + /* + * Queue a backup emergency shutdown in the event of + * orderly_poweroff failure + */ + hw_failure_emergency_poweroff(ms_until_forced); + orderly_poweroff(true); +} +EXPORT_SYMBOL_GPL(hw_protection_shutdown); + static int __init reboot_setup(char *str) { for (;;) { diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 5fc9c9b70862..978fcfca5871 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -36,3 +36,4 @@ obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o obj-$(CONFIG_MEMBARRIER) += membarrier.o obj-$(CONFIG_CPU_ISOLATION) += isolation.o obj-$(CONFIG_PSI) += psi.o +obj-$(CONFIG_SCHED_CORE) += core_sched.o diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5226cc26a095..cf16f8fda9a6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -84,6 +84,272 @@ unsigned int sysctl_sched_rt_period = 1000000; __read_mostly int scheduler_running; +#ifdef CONFIG_SCHED_CORE + +DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); + +/* kernel prio, less is more */ +static inline int __task_prio(struct task_struct *p) +{ + if (p->sched_class == &stop_sched_class) /* trumps deadline */ + return -2; + + if (rt_prio(p->prio)) /* includes deadline */ + return p->prio; /* [-1, 99] */ + + if (p->sched_class == &idle_sched_class) + return MAX_RT_PRIO + NICE_WIDTH; /* 140 */ + + return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */ +} + +/* + * l(a,b) + * le(a,b) := !l(b,a) + * g(a,b) := l(b,a) + * ge(a,b) := !l(a,b) + */ + +/* real prio, less is less */ +static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool in_fi) +{ + + int pa = __task_prio(a), pb = __task_prio(b); + + if (-pa < -pb) + return true; + + if (-pb < -pa) + return false; + + if (pa == -1) /* dl_prio() doesn't work because of stop_class above */ + return !dl_time_before(a->dl.deadline, b->dl.deadline); + + if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */ + return cfs_prio_less(a, b, in_fi); + + return false; +} + +static inline bool __sched_core_less(struct task_struct *a, struct task_struct *b) +{ + if (a->core_cookie < b->core_cookie) + return true; + + if (a->core_cookie > b->core_cookie) + return false; + + /* flip prio, so high prio is leftmost */ + if (prio_less(b, a, task_rq(a)->core->core_forceidle)) + return true; + + return false; +} + +#define __node_2_sc(node) rb_entry((node), struct task_struct, core_node) + +static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b) +{ + return __sched_core_less(__node_2_sc(a), __node_2_sc(b)); +} + +static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node) +{ + const struct task_struct *p = __node_2_sc(node); + unsigned long cookie = (unsigned long)key; + + if (cookie < p->core_cookie) + return -1; + + if (cookie > p->core_cookie) + return 1; + + return 0; +} + +void sched_core_enqueue(struct rq *rq, struct task_struct *p) +{ + rq->core->core_task_seq++; + + if (!p->core_cookie) + return; + + rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); +} + +void sched_core_dequeue(struct rq *rq, struct task_struct *p) +{ + rq->core->core_task_seq++; + + if (!sched_core_enqueued(p)) + return; + + rb_erase(&p->core_node, &rq->core_tree); + RB_CLEAR_NODE(&p->core_node); +} + +/* + * Find left-most (aka, highest priority) task matching @cookie. + */ +static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie) +{ + struct rb_node *node; + + node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp); + /* + * The idle task always matches any cookie! + */ + if (!node) + return idle_sched_class.pick_task(rq); + + return __node_2_sc(node); +} + +static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie) +{ + struct rb_node *node = &p->core_node; + + node = rb_next(node); + if (!node) + return NULL; + + p = container_of(node, struct task_struct, core_node); + if (p->core_cookie != cookie) + return NULL; + + return p; +} + +/* + * Magic required such that: + * + * raw_spin_rq_lock(rq); + * ... + * raw_spin_rq_unlock(rq); + * + * ends up locking and unlocking the _same_ lock, and all CPUs + * always agree on what rq has what lock. + * + * XXX entirely possible to selectively enable cores, don't bother for now. + */ + +static DEFINE_MUTEX(sched_core_mutex); +static atomic_t sched_core_count; +static struct cpumask sched_core_mask; + +static void __sched_core_flip(bool enabled) +{ + int cpu, t, i; + + cpus_read_lock(); + + /* + * Toggle the online cores, one by one. + */ + cpumask_copy(&sched_core_mask, cpu_online_mask); + for_each_cpu(cpu, &sched_core_mask) { + const struct cpumask *smt_mask = cpu_smt_mask(cpu); + + i = 0; + local_irq_disable(); + for_each_cpu(t, smt_mask) { + /* supports up to SMT8 */ + raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); + } + + for_each_cpu(t, smt_mask) + cpu_rq(t)->core_enabled = enabled; + + for_each_cpu(t, smt_mask) + raw_spin_unlock(&cpu_rq(t)->__lock); + local_irq_enable(); + + cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask); + } + + /* + * Toggle the offline CPUs. + */ + cpumask_copy(&sched_core_mask, cpu_possible_mask); + cpumask_andnot(&sched_core_mask, &sched_core_mask, cpu_online_mask); + + for_each_cpu(cpu, &sched_core_mask) + cpu_rq(cpu)->core_enabled = enabled; + + cpus_read_unlock(); +} + +static void sched_core_assert_empty(void) +{ + int cpu; + + for_each_possible_cpu(cpu) + WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); +} + +static void __sched_core_enable(void) +{ + static_branch_enable(&__sched_core_enabled); + /* + * Ensure all previous instances of raw_spin_rq_*lock() have finished + * and future ones will observe !sched_core_disabled(). + */ + synchronize_rcu(); + __sched_core_flip(true); + sched_core_assert_empty(); +} + +static void __sched_core_disable(void) +{ + sched_core_assert_empty(); + __sched_core_flip(false); + static_branch_disable(&__sched_core_enabled); +} + +void sched_core_get(void) +{ + if (atomic_inc_not_zero(&sched_core_count)) + return; + + mutex_lock(&sched_core_mutex); + if (!atomic_read(&sched_core_count)) + __sched_core_enable(); + + smp_mb__before_atomic(); + atomic_inc(&sched_core_count); + mutex_unlock(&sched_core_mutex); +} + +static void __sched_core_put(struct work_struct *work) +{ + if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) { + __sched_core_disable(); + mutex_unlock(&sched_core_mutex); + } +} + +void sched_core_put(void) +{ + static DECLARE_WORK(_work, __sched_core_put); + + /* + * "There can be only one" + * + * Either this is the last one, or we don't actually need to do any + * 'work'. If it is the last *again*, we rely on + * WORK_STRUCT_PENDING_BIT. + */ + if (!atomic_add_unless(&sched_core_count, -1, 1)) + schedule_work(&_work); +} + +#else /* !CONFIG_SCHED_CORE */ + +static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { } +static inline void sched_core_dequeue(struct rq *rq, struct task_struct *p) { } + +#endif /* CONFIG_SCHED_CORE */ + /* * part of the period that we allow rt tasks to run in us. * default: 0.95s @@ -184,6 +450,79 @@ int sysctl_sched_rt_runtime = 950000; * */ +void raw_spin_rq_lock_nested(struct rq *rq, int subclass) +{ + raw_spinlock_t *lock; + + /* Matches synchronize_rcu() in __sched_core_enable() */ + preempt_disable(); + if (sched_core_disabled()) { + raw_spin_lock_nested(&rq->__lock, subclass); + /* preempt_count *MUST* be > 1 */ + preempt_enable_no_resched(); + return; + } + + for (;;) { + lock = __rq_lockp(rq); + raw_spin_lock_nested(lock, subclass); + if (likely(lock == __rq_lockp(rq))) { + /* preempt_count *MUST* be > 1 */ + preempt_enable_no_resched(); + return; + } + raw_spin_unlock(lock); + } +} + +bool raw_spin_rq_trylock(struct rq *rq) +{ + raw_spinlock_t *lock; + bool ret; + + /* Matches synchronize_rcu() in __sched_core_enable() */ + preempt_disable(); + if (sched_core_disabled()) { + ret = raw_spin_trylock(&rq->__lock); + preempt_enable(); + return ret; + } + + for (;;) { + lock = __rq_lockp(rq); + ret = raw_spin_trylock(lock); + if (!ret || (likely(lock == __rq_lockp(rq)))) { + preempt_enable(); + return ret; + } + raw_spin_unlock(lock); + } +} + +void raw_spin_rq_unlock(struct rq *rq) +{ + raw_spin_unlock(rq_lockp(rq)); +} + +#ifdef CONFIG_SMP +/* + * double_rq_lock - safely lock two runqueues + */ +void double_rq_lock(struct rq *rq1, struct rq *rq2) +{ + lockdep_assert_irqs_disabled(); + + if (rq_order_less(rq2, rq1)) + swap(rq1, rq2); + + raw_spin_rq_lock(rq1); + if (__rq_lockp(rq1) == __rq_lockp(rq2)) + return; + + raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING); +} +#endif + /* * __task_rq_lock - lock the rq @p resides on. */ @@ -196,12 +535,12 @@ struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) for (;;) { rq = task_rq(p); - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { rq_pin_lock(rq, rf); return rq; } - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); while (unlikely(task_on_rq_migrating(p))) cpu_relax(); @@ -220,7 +559,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) for (;;) { raw_spin_lock_irqsave(&p->pi_lock, rf->flags); rq = task_rq(p); - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); /* * move_queued_task() task_rq_lock() * @@ -242,7 +581,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) rq_pin_lock(rq, rf); return rq; } - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); while (unlikely(task_on_rq_migrating(p))) @@ -312,7 +651,7 @@ void update_rq_clock(struct rq *rq) { s64 delta; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); if (rq->clock_update_flags & RQCF_ACT_SKIP) return; @@ -585,7 +924,6 @@ void wake_up_q(struct wake_q_head *head) struct task_struct *task; task = container_of(node, struct task_struct, wake_q); - BUG_ON(!task); /* Task can safely be re-inserted now: */ node = node->next; task->wake_q.next = NULL; @@ -611,7 +949,7 @@ void resched_curr(struct rq *rq) struct task_struct *curr = rq->curr; int cpu; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); if (test_tsk_need_resched(curr)) return; @@ -635,10 +973,10 @@ void resched_cpu(int cpu) struct rq *rq = cpu_rq(cpu); unsigned long flags; - raw_spin_lock_irqsave(&rq->lock, flags); + raw_spin_rq_lock_irqsave(rq, flags); if (cpu_online(cpu) || cpu == smp_processor_id()) resched_curr(rq); - raw_spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_rq_unlock_irqrestore(rq, flags); } #ifdef CONFIG_SMP @@ -1065,9 +1403,10 @@ static void uclamp_sync_util_min_rt_default(void) static inline struct uclamp_se uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) { + /* Copy by value as we could modify it */ struct uclamp_se uc_req = p->uclamp_req[clamp_id]; #ifdef CONFIG_UCLAMP_TASK_GROUP - struct uclamp_se uc_max; + unsigned int tg_min, tg_max, value; /* * Tasks in autogroups or root task group will be @@ -1078,9 +1417,11 @@ uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) if (task_group(p) == &root_task_group) return uc_req; - uc_max = task_group(p)->uclamp[clamp_id]; - if (uc_req.value > uc_max.value || !uc_req.user_defined) - return uc_max; + tg_min = task_group(p)->uclamp[UCLAMP_MIN].value; + tg_max = task_group(p)->uclamp[UCLAMP_MAX].value; + value = uc_req.value; + value = clamp(value, tg_min, tg_max); + uclamp_se_set(&uc_req, value, false); #endif return uc_req; @@ -1137,7 +1478,7 @@ static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, struct uclamp_se *uc_se = &p->uclamp[clamp_id]; struct uclamp_bucket *bucket; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); /* Update task effective clamp */ p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); @@ -1177,7 +1518,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, unsigned int bkt_clamp; unsigned int rq_clamp; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); /* * If sched_uclamp_used was enabled after task @p was enqueued, @@ -1279,8 +1620,9 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) } static inline void -uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id) +uclamp_update_active(struct task_struct *p) { + enum uclamp_id clamp_id; struct rq_flags rf; struct rq *rq; @@ -1300,9 +1642,11 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id) * affecting a valid clamp bucket, the next time it's enqueued, * it will already see the updated clamp bucket value. */ - if (p->uclamp[clamp_id].active) { - uclamp_rq_dec_id(rq, p, clamp_id); - uclamp_rq_inc_id(rq, p, clamp_id); + for_each_clamp_id(clamp_id) { + if (p->uclamp[clamp_id].active) { + uclamp_rq_dec_id(rq, p, clamp_id); + uclamp_rq_inc_id(rq, p, clamp_id); + } } task_rq_unlock(rq, p, &rf); @@ -1310,20 +1654,14 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id) #ifdef CONFIG_UCLAMP_TASK_GROUP static inline void -uclamp_update_active_tasks(struct cgroup_subsys_state *css, - unsigned int clamps) +uclamp_update_active_tasks(struct cgroup_subsys_state *css) { - enum uclamp_id clamp_id; struct css_task_iter it; struct task_struct *p; css_task_iter_start(css, 0, &it); - while ((p = css_task_iter_next(&it))) { - for_each_clamp_id(clamp_id) { - if ((0x1 << clamp_id) & clamps) - uclamp_update_active(p, clamp_id); - } - } + while ((p = css_task_iter_next(&it))) + uclamp_update_active(p); css_task_iter_end(&it); } @@ -1590,27 +1928,38 @@ static inline void uclamp_post_fork(struct task_struct *p) { } static inline void init_uclamp(void) { } #endif /* CONFIG_UCLAMP_TASK */ +bool sched_task_on_rq(struct task_struct *p) +{ + return task_on_rq_queued(p); +} + static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) { if (!(flags & ENQUEUE_NOCLOCK)) update_rq_clock(rq); if (!(flags & ENQUEUE_RESTORE)) { - sched_info_queued(rq, p); + sched_info_enqueue(rq, p); psi_enqueue(p, flags & ENQUEUE_WAKEUP); } uclamp_rq_inc(rq, p); p->sched_class->enqueue_task(rq, p, flags); + + if (sched_core_enabled(rq)) + sched_core_enqueue(rq, p); } static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) { + if (sched_core_enabled(rq)) + sched_core_dequeue(rq, p); + if (!(flags & DEQUEUE_NOCLOCK)) update_rq_clock(rq); if (!(flags & DEQUEUE_SAVE)) { - sched_info_dequeued(rq, p); + sched_info_dequeue(rq, p); psi_dequeue(p, flags & DEQUEUE_SLEEP); } @@ -1850,7 +2199,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, struct task_struct *p, int new_cpu) { - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); deactivate_task(rq, p, DEQUEUE_NOCLOCK); set_task_cpu(p, new_cpu); @@ -1916,7 +2265,6 @@ static int migration_cpu_stop(void *data) struct migration_arg *arg = data; struct set_affinity_pending *pending = arg->pending; struct task_struct *p = arg->task; - int dest_cpu = arg->dest_cpu; struct rq *rq = this_rq(); bool complete = false; struct rq_flags rf; @@ -1954,19 +2302,15 @@ static int migration_cpu_stop(void *data) if (pending) { p->migration_pending = NULL; complete = true; - } - if (dest_cpu < 0) { if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) goto out; - - dest_cpu = cpumask_any_distribute(&p->cpus_mask); } if (task_on_rq_queued(p)) - rq = __migrate_task(rq, &rf, p, dest_cpu); + rq = __migrate_task(rq, &rf, p, arg->dest_cpu); else - p->wake_cpu = dest_cpu; + p->wake_cpu = arg->dest_cpu; /* * XXX __migrate_task() can fail, at which point we might end @@ -2024,7 +2368,7 @@ int push_cpu_stop(void *arg) struct task_struct *p = arg; raw_spin_lock_irq(&p->pi_lock); - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); if (task_rq(p) != rq) goto out_unlock; @@ -2054,7 +2398,7 @@ int push_cpu_stop(void *arg) out_unlock: rq->push_busy = false; - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); raw_spin_unlock_irq(&p->pi_lock); put_task_struct(p); @@ -2107,7 +2451,7 @@ __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 * Because __kthread_bind() calls this on blocked tasks without * holding rq->lock. */ - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); } if (running) @@ -2249,7 +2593,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag init_completion(&my_pending.done); my_pending.arg = (struct migration_arg) { .task = p, - .dest_cpu = -1, /* any */ + .dest_cpu = dest_cpu, .pending = &my_pending, }; @@ -2257,6 +2601,15 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag } else { pending = p->migration_pending; refcount_inc(&pending->refs); + /* + * Affinity has changed, but we've already installed a + * pending. migration_cpu_stop() *must* see this, else + * we risk a completion of the pending despite having a + * task on a disallowed CPU. + * + * Serialized by p->pi_lock, so this is safe. + */ + pending->arg.dest_cpu = dest_cpu; } } pending = p->migration_pending; @@ -2277,7 +2630,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag return -EINVAL; } - if (task_running(rq, p) || p->state == TASK_WAKING) { + if (task_running(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { /* * MIGRATE_ENABLE gets here because 'p == current', but for * anything else we cannot do is_migration_disabled(), punt @@ -2420,19 +2773,20 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); void set_task_cpu(struct task_struct *p, unsigned int new_cpu) { #ifdef CONFIG_SCHED_DEBUG + unsigned int state = READ_ONCE(p->__state); + /* * We should never call set_task_cpu() on a blocked task, * ttwu() will sort out the placement. */ - WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && - !p->on_rq); + WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq); /* * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, * because schedstat_wait_{start,end} rebase migrating task's wait_start * time relying on p->on_rq. */ - WARN_ON_ONCE(p->state == TASK_RUNNING && + WARN_ON_ONCE(state == TASK_RUNNING && p->sched_class == &fair_sched_class && (p->on_rq && !task_on_rq_migrating(p))); @@ -2448,7 +2802,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) * task_rq_lock(). */ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || - lockdep_is_held(&task_rq(p)->lock))); + lockdep_is_held(__rq_lockp(task_rq(p))))); #endif /* * Clearly, migrating tasks to offline CPUs is a fairly daft thing. @@ -2604,7 +2958,7 @@ out: * smp_call_function() if an IPI is sent by the same process we are * waiting to become inactive. */ -unsigned long wait_task_inactive(struct task_struct *p, long match_state) +unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) { int running, queued; struct rq_flags rf; @@ -2632,7 +2986,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) * is actually now running somewhere else! */ while (task_running(rq, p)) { - if (match_state && unlikely(p->state != match_state)) + if (match_state && unlikely(READ_ONCE(p->__state) != match_state)) return 0; cpu_relax(); } @@ -2647,7 +3001,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) running = task_running(rq, p); queued = task_on_rq_queued(p); ncsw = 0; - if (!match_state || p->state == match_state) + if (!match_state || READ_ONCE(p->__state) == match_state) ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ task_rq_unlock(rq, p, &rf); @@ -2956,7 +3310,7 @@ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, struct rq_flags *rf) { check_preempt_curr(rq, p, wake_flags); - p->state = TASK_RUNNING; + WRITE_ONCE(p->__state, TASK_RUNNING); trace_sched_wakeup(p); #ifdef CONFIG_SMP @@ -2979,6 +3333,9 @@ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, if (rq->avg_idle > max) rq->avg_idle = max; + rq->wake_stamp = jiffies; + rq->wake_avg_idle = rq->avg_idle / 2; + rq->idle_stamp = 0; } #endif @@ -2990,7 +3347,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, { int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); if (p->sched_contributes_to_load) rq->nr_uninterruptible--; @@ -3345,12 +3702,12 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) * - we're serialized against set_special_state() by virtue of * it disabling IRQs (this allows not taking ->pi_lock). */ - if (!(p->state & state)) + if (!(READ_ONCE(p->__state) & state)) goto out; success = 1; trace_sched_waking(p); - p->state = TASK_RUNNING; + WRITE_ONCE(p->__state, TASK_RUNNING); trace_sched_wakeup(p); goto out; } @@ -3363,7 +3720,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) */ raw_spin_lock_irqsave(&p->pi_lock, flags); smp_mb__after_spinlock(); - if (!(p->state & state)) + if (!(READ_ONCE(p->__state) & state)) goto unlock; trace_sched_waking(p); @@ -3429,7 +3786,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) * TASK_WAKING such that we can unlock p->pi_lock before doing the * enqueue, such as ttwu_queue_wakelist(). */ - p->state = TASK_WAKING; + WRITE_ONCE(p->__state, TASK_WAKING); /* * If the owning (remote) CPU is still in the middle of schedule() with @@ -3522,7 +3879,7 @@ bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct t ret = func(p, arg); rq_unlock(rq, &rf); } else { - switch (p->state) { + switch (READ_ONCE(p->__state)) { case TASK_RUNNING: case TASK_WAKING: break; @@ -3648,7 +4005,6 @@ int sysctl_numa_balancing(struct ctl_table *table, int write, #ifdef CONFIG_SCHEDSTATS DEFINE_STATIC_KEY_FALSE(sched_schedstats); -static bool __initdata __sched_schedstats = false; static void set_schedstats(bool enabled) { @@ -3672,16 +4028,11 @@ static int __init setup_schedstats(char *str) if (!str) goto out; - /* - * This code is called before jump labels have been set up, so we can't - * change the static branch directly just yet. Instead set a temporary - * variable so init_schedstats() can do it later. - */ if (!strcmp(str, "enable")) { - __sched_schedstats = true; + set_schedstats(true); ret = 1; } else if (!strcmp(str, "disable")) { - __sched_schedstats = false; + set_schedstats(false); ret = 1; } out: @@ -3692,11 +4043,6 @@ out: } __setup("schedstats=", setup_schedstats); -static void __init init_schedstats(void) -{ - set_schedstats(__sched_schedstats); -} - #ifdef CONFIG_PROC_SYSCTL int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) @@ -3718,8 +4064,6 @@ int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, return err; } #endif /* CONFIG_PROC_SYSCTL */ -#else /* !CONFIG_SCHEDSTATS */ -static inline void init_schedstats(void) {} #endif /* CONFIG_SCHEDSTATS */ /* @@ -3735,7 +4079,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) * nobody will actually run it, and a signal or other external * event cannot wake it up and insert it on the runqueue either. */ - p->state = TASK_NEW; + p->__state = TASK_NEW; /* * Make sure we do not leak PI boosting priority to the child. @@ -3841,7 +4185,7 @@ void wake_up_new_task(struct task_struct *p) struct rq *rq; raw_spin_lock_irqsave(&p->pi_lock, rf.flags); - p->state = TASK_RUNNING; + WRITE_ONCE(p->__state, TASK_RUNNING); #ifdef CONFIG_SMP /* * Fork balancing, do it here and not earlier because: @@ -4001,7 +4345,7 @@ static void do_balance_callbacks(struct rq *rq, struct callback_head *head) void (*func)(struct rq *rq); struct callback_head *next; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); while (head) { func = (void (*)(struct rq *))head->func; @@ -4024,7 +4368,7 @@ static inline struct callback_head *splice_balance_callbacks(struct rq *rq) { struct callback_head *head = rq->balance_callback; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); if (head) rq->balance_callback = NULL; @@ -4041,9 +4385,9 @@ static inline void balance_callbacks(struct rq *rq, struct callback_head *head) unsigned long flags; if (unlikely(head)) { - raw_spin_lock_irqsave(&rq->lock, flags); + raw_spin_rq_lock_irqsave(rq, flags); do_balance_callbacks(rq, head); - raw_spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_rq_unlock_irqrestore(rq, flags); } } @@ -4074,10 +4418,10 @@ prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf * do an early lockdep release here: */ rq_unpin_lock(rq, rf); - spin_release(&rq->lock.dep_map, _THIS_IP_); + spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_); #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ - rq->lock.owner = next; + rq_lockp(rq)->owner = next; #endif } @@ -4088,9 +4432,9 @@ static inline void finish_lock_switch(struct rq *rq) * fix up the runqueue lock - which gets 'carried over' from * prev into current: */ - spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); + spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); __balance_callbacks(rq); - raw_spin_unlock_irq(&rq->lock); + raw_spin_rq_unlock_irq(rq); } /* @@ -4203,10 +4547,11 @@ static struct rq *finish_task_switch(struct task_struct *prev) * running on another CPU and we could rave with its RUNNING -> DEAD * transition, resulting in a double drop. */ - prev_state = prev->state; + prev_state = READ_ONCE(prev->__state); vtime_task_switch(prev); perf_event_task_sched_in(prev, current); finish_task(prev); + tick_nohz_task_switch(); finish_lock_switch(rq); finish_arch_post_lock_switch(); kcov_finish_switch(current); @@ -4252,7 +4597,6 @@ static struct rq *finish_task_switch(struct task_struct *prev) put_task_struct_rcu_user(prev); } - tick_nohz_task_switch(); return rq; } @@ -4348,9 +4692,9 @@ context_switch(struct rq *rq, struct task_struct *prev, * externally visible scheduler statistics: current number of runnable * threads, total number of context switches performed since bootup. */ -unsigned long nr_running(void) +unsigned int nr_running(void) { - unsigned long i, sum = 0; + unsigned int i, sum = 0; for_each_online_cpu(i) sum += cpu_rq(i)->nr_running; @@ -4395,7 +4739,7 @@ unsigned long long nr_context_switches(void) * it does become runnable. */ -unsigned long nr_iowait_cpu(int cpu) +unsigned int nr_iowait_cpu(int cpu) { return atomic_read(&cpu_rq(cpu)->nr_iowait); } @@ -4430,9 +4774,9 @@ unsigned long nr_iowait_cpu(int cpu) * Task CPU affinities can make all that even more 'interesting'. */ -unsigned long nr_iowait(void) +unsigned int nr_iowait(void) { - unsigned long i, sum = 0; + unsigned int i, sum = 0; for_each_possible_cpu(i) sum += nr_iowait_cpu(i); @@ -4897,7 +5241,7 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt) #endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP - if (!preempt && prev->state && prev->non_block_count) { + if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) { printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", prev->comm, prev->pid, prev->non_block_count); dump_stack(); @@ -4943,7 +5287,7 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, * Pick up the highest-prio task: */ static inline struct task_struct * -pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) +__pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { const struct sched_class *class; struct task_struct *p; @@ -4961,7 +5305,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) if (unlikely(p == RETRY_TASK)) goto restart; - /* Assumes fair_sched_class->next == idle_sched_class */ + /* Assume the next prioritized class is idle_sched_class */ if (!p) { put_prev_task(rq, prev); p = pick_next_task_idle(rq); @@ -4983,6 +5327,455 @@ restart: BUG(); } +#ifdef CONFIG_SCHED_CORE +static inline bool is_task_rq_idle(struct task_struct *t) +{ + return (task_rq(t)->idle == t); +} + +static inline bool cookie_equals(struct task_struct *a, unsigned long cookie) +{ + return is_task_rq_idle(a) || (a->core_cookie == cookie); +} + +static inline bool cookie_match(struct task_struct *a, struct task_struct *b) +{ + if (is_task_rq_idle(a) || is_task_rq_idle(b)) + return true; + + return a->core_cookie == b->core_cookie; +} + +// XXX fairness/fwd progress conditions +/* + * Returns + * - NULL if there is no runnable task for this class. + * - the highest priority task for this runqueue if it matches + * rq->core->core_cookie or its priority is greater than max. + * - Else returns idle_task. + */ +static struct task_struct * +pick_task(struct rq *rq, const struct sched_class *class, struct task_struct *max, bool in_fi) +{ + struct task_struct *class_pick, *cookie_pick; + unsigned long cookie = rq->core->core_cookie; + + class_pick = class->pick_task(rq); + if (!class_pick) + return NULL; + + if (!cookie) { + /* + * If class_pick is tagged, return it only if it has + * higher priority than max. + */ + if (max && class_pick->core_cookie && + prio_less(class_pick, max, in_fi)) + return idle_sched_class.pick_task(rq); + + return class_pick; + } + + /* + * If class_pick is idle or matches cookie, return early. + */ + if (cookie_equals(class_pick, cookie)) + return class_pick; + + cookie_pick = sched_core_find(rq, cookie); + + /* + * If class > max && class > cookie, it is the highest priority task on + * the core (so far) and it must be selected, otherwise we must go with + * the cookie pick in order to satisfy the constraint. + */ + if (prio_less(cookie_pick, class_pick, in_fi) && + (!max || prio_less(max, class_pick, in_fi))) + return class_pick; + + return cookie_pick; +} + +extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi); + +static struct task_struct * +pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) +{ + struct task_struct *next, *max = NULL; + const struct sched_class *class; + const struct cpumask *smt_mask; + bool fi_before = false; + int i, j, cpu, occ = 0; + bool need_sync; + + if (!sched_core_enabled(rq)) + return __pick_next_task(rq, prev, rf); + + cpu = cpu_of(rq); + + /* Stopper task is switching into idle, no need core-wide selection. */ + if (cpu_is_offline(cpu)) { + /* + * Reset core_pick so that we don't enter the fastpath when + * coming online. core_pick would already be migrated to + * another cpu during offline. + */ + rq->core_pick = NULL; + return __pick_next_task(rq, prev, rf); + } + + /* + * If there were no {en,de}queues since we picked (IOW, the task + * pointers are all still valid), and we haven't scheduled the last + * pick yet, do so now. + * + * rq->core_pick can be NULL if no selection was made for a CPU because + * it was either offline or went offline during a sibling's core-wide + * selection. In this case, do a core-wide selection. + */ + if (rq->core->core_pick_seq == rq->core->core_task_seq && + rq->core->core_pick_seq != rq->core_sched_seq && + rq->core_pick) { + WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); + + next = rq->core_pick; + if (next != prev) { + put_prev_task(rq, prev); + set_next_task(rq, next); + } + + rq->core_pick = NULL; + return next; + } + + put_prev_task_balance(rq, prev, rf); + + smt_mask = cpu_smt_mask(cpu); + need_sync = !!rq->core->core_cookie; + + /* reset state */ + rq->core->core_cookie = 0UL; + if (rq->core->core_forceidle) { + need_sync = true; + fi_before = true; + rq->core->core_forceidle = false; + } + + /* + * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq + * + * @task_seq guards the task state ({en,de}queues) + * @pick_seq is the @task_seq we did a selection on + * @sched_seq is the @pick_seq we scheduled + * + * However, preemptions can cause multiple picks on the same task set. + * 'Fix' this by also increasing @task_seq for every pick. + */ + rq->core->core_task_seq++; + + /* + * Optimize for common case where this CPU has no cookies + * and there are no cookied tasks running on siblings. + */ + if (!need_sync) { + for_each_class(class) { + next = class->pick_task(rq); + if (next) + break; + } + + if (!next->core_cookie) { + rq->core_pick = NULL; + /* + * For robustness, update the min_vruntime_fi for + * unconstrained picks as well. + */ + WARN_ON_ONCE(fi_before); + task_vruntime_update(rq, next, false); + goto done; + } + } + + for_each_cpu(i, smt_mask) { + struct rq *rq_i = cpu_rq(i); + + rq_i->core_pick = NULL; + + if (i != cpu) + update_rq_clock(rq_i); + } + + /* + * Try and select tasks for each sibling in descending sched_class + * order. + */ + for_each_class(class) { +again: + for_each_cpu_wrap(i, smt_mask, cpu) { + struct rq *rq_i = cpu_rq(i); + struct task_struct *p; + + if (rq_i->core_pick) + continue; + + /* + * If this sibling doesn't yet have a suitable task to + * run; ask for the most eligible task, given the + * highest priority task already selected for this + * core. + */ + p = pick_task(rq_i, class, max, fi_before); + if (!p) + continue; + + if (!is_task_rq_idle(p)) + occ++; + + rq_i->core_pick = p; + if (rq_i->idle == p && rq_i->nr_running) { + rq->core->core_forceidle = true; + if (!fi_before) + rq->core->core_forceidle_seq++; + } + + /* + * If this new candidate is of higher priority than the + * previous; and they're incompatible; we need to wipe + * the slate and start over. pick_task makes sure that + * p's priority is more than max if it doesn't match + * max's cookie. + * + * NOTE: this is a linear max-filter and is thus bounded + * in execution time. + */ + if (!max || !cookie_match(max, p)) { + struct task_struct *old_max = max; + + rq->core->core_cookie = p->core_cookie; + max = p; + + if (old_max) { + rq->core->core_forceidle = false; + for_each_cpu(j, smt_mask) { + if (j == i) + continue; + + cpu_rq(j)->core_pick = NULL; + } + occ = 1; + goto again; + } + } + } + } + + rq->core->core_pick_seq = rq->core->core_task_seq; + next = rq->core_pick; + rq->core_sched_seq = rq->core->core_pick_seq; + + /* Something should have been selected for current CPU */ + WARN_ON_ONCE(!next); + + /* + * Reschedule siblings + * + * NOTE: L1TF -- at this point we're no longer running the old task and + * sending an IPI (below) ensures the sibling will no longer be running + * their task. This ensures there is no inter-sibling overlap between + * non-matching user state. + */ + for_each_cpu(i, smt_mask) { + struct rq *rq_i = cpu_rq(i); + + /* + * An online sibling might have gone offline before a task + * could be picked for it, or it might be offline but later + * happen to come online, but its too late and nothing was + * picked for it. That's Ok - it will pick tasks for itself, + * so ignore it. + */ + if (!rq_i->core_pick) + continue; + + /* + * Update for new !FI->FI transitions, or if continuing to be in !FI: + * fi_before fi update? + * 0 0 1 + * 0 1 1 + * 1 0 1 + * 1 1 0 + */ + if (!(fi_before && rq->core->core_forceidle)) + task_vruntime_update(rq_i, rq_i->core_pick, rq->core->core_forceidle); + + rq_i->core_pick->core_occupation = occ; + + if (i == cpu) { + rq_i->core_pick = NULL; + continue; + } + + /* Did we break L1TF mitigation requirements? */ + WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick)); + + if (rq_i->curr == rq_i->core_pick) { + rq_i->core_pick = NULL; + continue; + } + + resched_curr(rq_i); + } + +done: + set_next_task(rq, next); + return next; +} + +static bool try_steal_cookie(int this, int that) +{ + struct rq *dst = cpu_rq(this), *src = cpu_rq(that); + struct task_struct *p; + unsigned long cookie; + bool success = false; + + local_irq_disable(); + double_rq_lock(dst, src); + + cookie = dst->core->core_cookie; + if (!cookie) + goto unlock; + + if (dst->curr != dst->idle) + goto unlock; + + p = sched_core_find(src, cookie); + if (p == src->idle) + goto unlock; + + do { + if (p == src->core_pick || p == src->curr) + goto next; + + if (!cpumask_test_cpu(this, &p->cpus_mask)) + goto next; + + if (p->core_occupation > dst->idle->core_occupation) + goto next; + + p->on_rq = TASK_ON_RQ_MIGRATING; + deactivate_task(src, p, 0); + set_task_cpu(p, this); + activate_task(dst, p, 0); + p->on_rq = TASK_ON_RQ_QUEUED; + + resched_curr(dst); + + success = true; + break; + +next: + p = sched_core_next(p, cookie); + } while (p); + +unlock: + double_rq_unlock(dst, src); + local_irq_enable(); + + return success; +} + +static bool steal_cookie_task(int cpu, struct sched_domain *sd) +{ + int i; + + for_each_cpu_wrap(i, sched_domain_span(sd), cpu) { + if (i == cpu) + continue; + + if (need_resched()) + break; + + if (try_steal_cookie(cpu, i)) + return true; + } + + return false; +} + +static void sched_core_balance(struct rq *rq) +{ + struct sched_domain *sd; + int cpu = cpu_of(rq); + + preempt_disable(); + rcu_read_lock(); + raw_spin_rq_unlock_irq(rq); + for_each_domain(cpu, sd) { + if (need_resched()) + break; + + if (steal_cookie_task(cpu, sd)) + break; + } + raw_spin_rq_lock_irq(rq); + rcu_read_unlock(); + preempt_enable(); +} + +static DEFINE_PER_CPU(struct callback_head, core_balance_head); + +void queue_core_balance(struct rq *rq) +{ + if (!sched_core_enabled(rq)) + return; + + if (!rq->core->core_cookie) + return; + + if (!rq->nr_running) /* not forced idle */ + return; + + queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance); +} + +static inline void sched_core_cpu_starting(unsigned int cpu) +{ + const struct cpumask *smt_mask = cpu_smt_mask(cpu); + struct rq *rq, *core_rq = NULL; + int i; + + core_rq = cpu_rq(cpu)->core; + + if (!core_rq) { + for_each_cpu(i, smt_mask) { + rq = cpu_rq(i); + if (rq->core && rq->core == rq) + core_rq = rq; + } + + if (!core_rq) + core_rq = cpu_rq(cpu); + + for_each_cpu(i, smt_mask) { + rq = cpu_rq(i); + + WARN_ON_ONCE(rq->core && rq->core != core_rq); + rq->core = core_rq; + } + } +} +#else /* !CONFIG_SCHED_CORE */ + +static inline void sched_core_cpu_starting(unsigned int cpu) {} + +static struct task_struct * +pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) +{ + return __pick_next_task(rq, prev, rf); +} + +#endif /* CONFIG_SCHED_CORE */ + /* * __schedule() is the main scheduler function. * @@ -5074,10 +5867,10 @@ static void __sched notrace __schedule(bool preempt) * - we form a control dependency vs deactivate_task() below. * - ptrace_{,un}freeze_traced() can change ->state underneath us. */ - prev_state = prev->state; + prev_state = READ_ONCE(prev->__state); if (!preempt && prev_state) { if (signal_pending_state(prev_state, prev)) { - prev->state = TASK_RUNNING; + WRITE_ONCE(prev->__state, TASK_RUNNING); } else { prev->sched_contributes_to_load = (prev_state & TASK_UNINTERRUPTIBLE) && @@ -5150,7 +5943,7 @@ static void __sched notrace __schedule(bool preempt) rq_unpin_lock(rq, &rf); __balance_callbacks(rq); - raw_spin_unlock_irq(&rq->lock); + raw_spin_rq_unlock_irq(rq); } } @@ -5174,7 +5967,7 @@ static inline void sched_submit_work(struct task_struct *tsk) { unsigned int task_flags; - if (!tsk->state) + if (task_is_running(tsk)) return; task_flags = tsk->flags; @@ -5249,7 +6042,7 @@ void __sched schedule_idle(void) * current task can be in any other state. Note, idle is always in the * TASK_RUNNING state. */ - WARN_ON_ONCE(current->state); + WARN_ON_ONCE(current->__state); do { __schedule(false); } while (need_resched()); @@ -5692,7 +6485,7 @@ out_unlock: rq_unpin_lock(rq, &rf); __balance_callbacks(rq); - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); preempt_enable(); } @@ -6389,7 +7182,6 @@ int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) { return __sched_setscheduler(p, attr, false, true); } -EXPORT_SYMBOL_GPL(sched_setattr_nocheck); /** * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. @@ -7149,7 +7941,7 @@ again: if (curr->sched_class != p->sched_class) goto out_unlock; - if (task_running(p_rq, p) || p->state) + if (task_running(p_rq, p) || !task_is_running(p)) goto out_unlock; yielded = curr->sched_class->yield_to_task(rq, p); @@ -7352,7 +8144,7 @@ void sched_show_task(struct task_struct *p) pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); - if (p->state == TASK_RUNNING) + if (task_is_running(p)) pr_cont(" running task "); #ifdef CONFIG_DEBUG_STACK_USAGE free = stack_not_used(p); @@ -7376,26 +8168,28 @@ EXPORT_SYMBOL_GPL(sched_show_task); static inline bool state_filter_match(unsigned long state_filter, struct task_struct *p) { + unsigned int state = READ_ONCE(p->__state); + /* no filter, everything matches */ if (!state_filter) return true; /* filter, but doesn't match */ - if (!(p->state & state_filter)) + if (!(state & state_filter)) return false; /* * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows * TASK_KILLABLE). */ - if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE) + if (state_filter == TASK_UNINTERRUPTIBLE && state == TASK_IDLE) return false; return true; } -void show_state_filter(unsigned long state_filter) +void show_state_filter(unsigned int state_filter) { struct task_struct *g, *p; @@ -7434,19 +8228,32 @@ void show_state_filter(unsigned long state_filter) * NOTE: this function does not set the idle thread's NEED_RESCHED * flag, to make booting more robust. */ -void init_idle(struct task_struct *idle, int cpu) +void __init init_idle(struct task_struct *idle, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; __sched_fork(0, idle); + /* + * The idle task doesn't need the kthread struct to function, but it + * is dressed up as a per-CPU kthread and thus needs to play the part + * if we want to avoid special-casing it in code that deals with per-CPU + * kthreads. + */ + set_kthread_struct(idle); + raw_spin_lock_irqsave(&idle->pi_lock, flags); - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); - idle->state = TASK_RUNNING; + idle->__state = TASK_RUNNING; idle->se.exec_start = sched_clock(); - idle->flags |= PF_IDLE; + /* + * PF_KTHREAD should already be set at this point; regardless, make it + * look like a proper per-CPU kthread. + */ + idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY; + kthread_set_per_cpu(idle, cpu); scs_task_reset(idle); kasan_unpoison_task_stack(idle); @@ -7480,7 +8287,7 @@ void init_idle(struct task_struct *idle, int cpu) #ifdef CONFIG_SMP idle->on_cpu = 1; #endif - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); raw_spin_unlock_irqrestore(&idle->pi_lock, flags); /* Set the preempt count _outside_ the spinlocks! */ @@ -7646,7 +8453,7 @@ static void balance_push(struct rq *rq) { struct task_struct *push_task = rq->curr; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); SCHED_WARN_ON(rq->cpu != smp_processor_id()); /* @@ -7663,12 +8470,8 @@ static void balance_push(struct rq *rq) /* * Both the cpu-hotplug and stop task are in this case and are * required to complete the hotplug process. - * - * XXX: the idle task does not match kthread_is_per_cpu() due to - * histerical raisins. */ - if (rq->idle == push_task || - kthread_is_per_cpu(push_task) || + if (kthread_is_per_cpu(push_task) || is_migration_disabled(push_task)) { /* @@ -7684,9 +8487,9 @@ static void balance_push(struct rq *rq) */ if (!rq->nr_running && !rq_has_pinned_tasks(rq) && rcuwait_active(&rq->hotplug_wait)) { - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); rcuwait_wake_up(&rq->hotplug_wait); - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); } return; } @@ -7696,7 +8499,7 @@ static void balance_push(struct rq *rq) * Temporarily drop rq->lock such that we can wake-up the stop task. * Both preemption and IRQs are still disabled. */ - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, this_cpu_ptr(&push_work)); /* @@ -7704,7 +8507,7 @@ static void balance_push(struct rq *rq) * schedule(). The next pick is obviously going to be the stop task * which kthread_is_per_cpu() and will push this task away. */ - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); } static void balance_push_set(int cpu, bool on) @@ -7948,6 +8751,7 @@ static void sched_rq_cpu_starting(unsigned int cpu) int sched_cpu_starting(unsigned int cpu) { + sched_core_cpu_starting(cpu); sched_rq_cpu_starting(cpu); sched_tick_start(cpu); return 0; @@ -7994,7 +8798,7 @@ static void dump_rq_tasks(struct rq *rq, const char *loglvl) struct task_struct *g, *p; int cpu = cpu_of(rq); - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); for_each_process_thread(g, p) { @@ -8046,6 +8850,7 @@ void __init sched_init_smp(void) /* Move init over to a non-isolated CPU */ if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) BUG(); + current->flags &= ~PF_NO_SETAFFINITY; sched_init_granularity(); init_sched_rt_class(); @@ -8167,7 +8972,7 @@ void __init sched_init(void) struct rq *rq; rq = cpu_rq(i); - raw_spin_lock_init(&rq->lock); + raw_spin_lock_init(&rq->__lock); rq->nr_running = 0; rq->calc_load_active = 0; rq->calc_load_update = jiffies + LOAD_FREQ; @@ -8215,6 +9020,8 @@ void __init sched_init(void) rq->online = 0; rq->idle_stamp = 0; rq->avg_idle = 2*sysctl_sched_migration_cost; + rq->wake_stamp = jiffies; + rq->wake_avg_idle = rq->avg_idle; rq->max_idle_balance_cost = sysctl_sched_migration_cost; INIT_LIST_HEAD(&rq->cfs_tasks); @@ -8232,6 +9039,16 @@ void __init sched_init(void) #endif /* CONFIG_SMP */ hrtick_rq_init(rq); atomic_set(&rq->nr_iowait, 0); + +#ifdef CONFIG_SCHED_CORE + rq->core = NULL; + rq->core_pick = NULL; + rq->core_enabled = 0; + rq->core_tree = RB_ROOT; + rq->core_forceidle = false; + + rq->core_cookie = 0UL; +#endif } set_load_weight(&init_task, false); @@ -8258,8 +9075,6 @@ void __init sched_init(void) #endif init_sched_fair_class(); - init_schedstats(); - psi_init(); init_uclamp(); @@ -8277,15 +9092,15 @@ static inline int preempt_count_equals(int preempt_offset) void __might_sleep(const char *file, int line, int preempt_offset) { + unsigned int state = get_current_state(); /* * Blocking primitives will set (and therefore destroy) current->state, * since we will exit with TASK_RUNNING make sure we enter with it, * otherwise we will destroy state. */ - WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, + WARN_ONCE(state != TASK_RUNNING && current->task_state_change, "do not call blocking ops when !TASK_RUNNING; " - "state=%lx set at [<%p>] %pS\n", - current->state, + "state=%x set at [<%p>] %pS\n", state, (void *)current->task_state_change, (void *)current->task_state_change); @@ -8681,7 +9496,11 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) #ifdef CONFIG_UCLAMP_TASK_GROUP /* Propagate the effective uclamp value for the new group */ + mutex_lock(&uclamp_mutex); + rcu_read_lock(); cpu_util_update_eff(css); + rcu_read_unlock(); + mutex_unlock(&uclamp_mutex); #endif return 0; @@ -8742,7 +9561,7 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) * has happened. This would lead to problems with PELT, due to * move wanting to detach+attach while we're not attached yet. */ - if (task->state == TASK_NEW) + if (READ_ONCE(task->__state) == TASK_NEW) ret = -EINVAL; raw_spin_unlock_irq(&task->pi_lock); @@ -8771,6 +9590,9 @@ static void cpu_util_update_eff(struct cgroup_subsys_state *css) enum uclamp_id clamp_id; unsigned int clamps; + lockdep_assert_held(&uclamp_mutex); + SCHED_WARN_ON(!rcu_read_lock_held()); + css_for_each_descendant_pre(css, top_css) { uc_parent = css_tg(css)->parent ? css_tg(css)->parent->uclamp : NULL; @@ -8803,7 +9625,7 @@ static void cpu_util_update_eff(struct cgroup_subsys_state *css) } /* Immediately update descendants RUNNABLE tasks */ - uclamp_update_active_tasks(css, clamps); + uclamp_update_active_tasks(css); } } @@ -8962,7 +9784,8 @@ static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); -static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) +static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, + u64 burst) { int i, ret = 0, runtime_enabled, runtime_was_enabled; struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; @@ -8992,6 +9815,10 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) if (quota != RUNTIME_INF && quota > max_cfs_runtime) return -EINVAL; + if (quota != RUNTIME_INF && (burst > quota || + burst + quota > max_cfs_runtime)) + return -EINVAL; + /* * Prevent race between setting of cfs_rq->runtime_enabled and * unthrottle_offline_cfs_rqs(). @@ -9013,6 +9840,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) raw_spin_lock_irq(&cfs_b->lock); cfs_b->period = ns_to_ktime(period); cfs_b->quota = quota; + cfs_b->burst = burst; __refill_cfs_bandwidth_runtime(cfs_b); @@ -9046,9 +9874,10 @@ out_unlock: static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) { - u64 quota, period; + u64 quota, period, burst; period = ktime_to_ns(tg->cfs_bandwidth.period); + burst = tg->cfs_bandwidth.burst; if (cfs_quota_us < 0) quota = RUNTIME_INF; else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) @@ -9056,7 +9885,7 @@ static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) else return -EINVAL; - return tg_set_cfs_bandwidth(tg, period, quota); + return tg_set_cfs_bandwidth(tg, period, quota, burst); } static long tg_get_cfs_quota(struct task_group *tg) @@ -9074,15 +9903,16 @@ static long tg_get_cfs_quota(struct task_group *tg) static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) { - u64 quota, period; + u64 quota, period, burst; if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) return -EINVAL; period = (u64)cfs_period_us * NSEC_PER_USEC; quota = tg->cfs_bandwidth.quota; + burst = tg->cfs_bandwidth.burst; - return tg_set_cfs_bandwidth(tg, period, quota); + return tg_set_cfs_bandwidth(tg, period, quota, burst); } static long tg_get_cfs_period(struct task_group *tg) @@ -9095,6 +9925,30 @@ static long tg_get_cfs_period(struct task_group *tg) return cfs_period_us; } +static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) +{ + u64 quota, period, burst; + + if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC) + return -EINVAL; + + burst = (u64)cfs_burst_us * NSEC_PER_USEC; + period = ktime_to_ns(tg->cfs_bandwidth.period); + quota = tg->cfs_bandwidth.quota; + + return tg_set_cfs_bandwidth(tg, period, quota, burst); +} + +static long tg_get_cfs_burst(struct task_group *tg) +{ + u64 burst_us; + + burst_us = tg->cfs_bandwidth.burst; + do_div(burst_us, NSEC_PER_USEC); + + return burst_us; +} + static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) { @@ -9119,6 +9973,18 @@ static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, return tg_set_cfs_period(css_tg(css), cfs_period_us); } +static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + return tg_get_cfs_burst(css_tg(css)); +} + +static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css, + struct cftype *cftype, u64 cfs_burst_us) +{ + return tg_set_cfs_burst(css_tg(css), cfs_burst_us); +} + struct cfs_schedulable_data { struct task_group *tg; u64 period, quota; @@ -9272,6 +10138,11 @@ static struct cftype cpu_legacy_files[] = { .write_u64 = cpu_cfs_period_write_u64, }, { + .name = "cfs_burst_us", + .read_u64 = cpu_cfs_burst_read_u64, + .write_u64 = cpu_cfs_burst_write_u64, + }, + { .name = "stat", .seq_show = cpu_cfs_stat_show, }, @@ -9436,12 +10307,13 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, { struct task_group *tg = css_tg(of_css(of)); u64 period = tg_get_cfs_period(tg); + u64 burst = tg_get_cfs_burst(tg); u64 quota; int ret; ret = cpu_period_quota_parse(buf, &period, "a); if (!ret) - ret = tg_set_cfs_bandwidth(tg, period, quota); + ret = tg_set_cfs_bandwidth(tg, period, quota, burst); return ret ?: nbytes; } #endif @@ -9468,6 +10340,12 @@ static struct cftype cpu_files[] = { .seq_show = cpu_max_show, .write = cpu_max_write, }, + { + .name = "max.burst", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = cpu_cfs_burst_read_u64, + .write_u64 = cpu_cfs_burst_write_u64, + }, #endif #ifdef CONFIG_UCLAMP_TASK_GROUP { diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c new file mode 100644 index 000000000000..9a80e9a474c0 --- /dev/null +++ b/kernel/sched/core_sched.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/prctl.h> +#include "sched.h" + +/* + * A simple wrapper around refcount. An allocated sched_core_cookie's + * address is used to compute the cookie of the task. + */ +struct sched_core_cookie { + refcount_t refcnt; +}; + +unsigned long sched_core_alloc_cookie(void) +{ + struct sched_core_cookie *ck = kmalloc(sizeof(*ck), GFP_KERNEL); + if (!ck) + return 0; + + refcount_set(&ck->refcnt, 1); + sched_core_get(); + + return (unsigned long)ck; +} + +void sched_core_put_cookie(unsigned long cookie) +{ + struct sched_core_cookie *ptr = (void *)cookie; + + if (ptr && refcount_dec_and_test(&ptr->refcnt)) { + kfree(ptr); + sched_core_put(); + } +} + +unsigned long sched_core_get_cookie(unsigned long cookie) +{ + struct sched_core_cookie *ptr = (void *)cookie; + + if (ptr) + refcount_inc(&ptr->refcnt); + + return cookie; +} + +/* + * sched_core_update_cookie - replace the cookie on a task + * @p: the task to update + * @cookie: the new cookie + * + * Effectively exchange the task cookie; caller is responsible for lifetimes on + * both ends. + * + * Returns: the old cookie + */ +unsigned long sched_core_update_cookie(struct task_struct *p, unsigned long cookie) +{ + unsigned long old_cookie; + struct rq_flags rf; + struct rq *rq; + bool enqueued; + + rq = task_rq_lock(p, &rf); + + /* + * Since creating a cookie implies sched_core_get(), and we cannot set + * a cookie until after we've created it, similarly, we cannot destroy + * a cookie until after we've removed it, we must have core scheduling + * enabled here. + */ + SCHED_WARN_ON((p->core_cookie || cookie) && !sched_core_enabled(rq)); + + enqueued = sched_core_enqueued(p); + if (enqueued) + sched_core_dequeue(rq, p); + + old_cookie = p->core_cookie; + p->core_cookie = cookie; + + if (enqueued) + sched_core_enqueue(rq, p); + + /* + * If task is currently running, it may not be compatible anymore after + * the cookie change, so enter the scheduler on its CPU to schedule it + * away. + */ + if (task_running(rq, p)) + resched_curr(rq); + + task_rq_unlock(rq, p, &rf); + + return old_cookie; +} + +static unsigned long sched_core_clone_cookie(struct task_struct *p) +{ + unsigned long cookie, flags; + + raw_spin_lock_irqsave(&p->pi_lock, flags); + cookie = sched_core_get_cookie(p->core_cookie); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); + + return cookie; +} + +void sched_core_fork(struct task_struct *p) +{ + RB_CLEAR_NODE(&p->core_node); + p->core_cookie = sched_core_clone_cookie(current); +} + +void sched_core_free(struct task_struct *p) +{ + sched_core_put_cookie(p->core_cookie); +} + +static void __sched_core_set(struct task_struct *p, unsigned long cookie) +{ + cookie = sched_core_get_cookie(cookie); + cookie = sched_core_update_cookie(p, cookie); + sched_core_put_cookie(cookie); +} + +/* Called from prctl interface: PR_SCHED_CORE */ +int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, + unsigned long uaddr) +{ + unsigned long cookie = 0, id = 0; + struct task_struct *task, *p; + struct pid *grp; + int err = 0; + + if (!static_branch_likely(&sched_smt_present)) + return -ENODEV; + + if (type > PIDTYPE_PGID || cmd >= PR_SCHED_CORE_MAX || pid < 0 || + (cmd != PR_SCHED_CORE_GET && uaddr)) + return -EINVAL; + + rcu_read_lock(); + if (pid == 0) { + task = current; + } else { + task = find_task_by_vpid(pid); + if (!task) { + rcu_read_unlock(); + return -ESRCH; + } + } + get_task_struct(task); + rcu_read_unlock(); + + /* + * Check if this process has the right to modify the specified + * process. Use the regular "ptrace_may_access()" checks. + */ + if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { + err = -EPERM; + goto out; + } + + switch (cmd) { + case PR_SCHED_CORE_GET: + if (type != PIDTYPE_PID || uaddr & 7) { + err = -EINVAL; + goto out; + } + cookie = sched_core_clone_cookie(task); + if (cookie) { + /* XXX improve ? */ + ptr_to_hashval((void *)cookie, &id); + } + err = put_user(id, (u64 __user *)uaddr); + goto out; + + case PR_SCHED_CORE_CREATE: + cookie = sched_core_alloc_cookie(); + if (!cookie) { + err = -ENOMEM; + goto out; + } + break; + + case PR_SCHED_CORE_SHARE_TO: + cookie = sched_core_clone_cookie(current); + break; + + case PR_SCHED_CORE_SHARE_FROM: + if (type != PIDTYPE_PID) { + err = -EINVAL; + goto out; + } + cookie = sched_core_clone_cookie(task); + __sched_core_set(current, cookie); + goto out; + + default: + err = -EINVAL; + goto out; + }; + + if (type == PIDTYPE_PID) { + __sched_core_set(task, cookie); + goto out; + } + + read_lock(&tasklist_lock); + grp = task_pid_type(task, type); + + do_each_pid_thread(grp, type, p) { + if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) { + err = -EPERM; + goto out_tasklist; + } + } while_each_pid_thread(grp, type, p); + + do_each_pid_thread(grp, type, p) { + __sched_core_set(p, cookie); + } while_each_pid_thread(grp, type, p); +out_tasklist: + read_unlock(&tasklist_lock); + +out: + sched_core_put_cookie(cookie); + put_task_struct(task); + return err; +} + diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 104a1bade14f..893eece65bfd 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -112,7 +112,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu, /* * Take rq->lock to make 64-bit read safe on 32-bit platforms. */ - raw_spin_lock_irq(&cpu_rq(cpu)->lock); + raw_spin_rq_lock_irq(cpu_rq(cpu)); #endif if (index == CPUACCT_STAT_NSTATS) { @@ -126,7 +126,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu, } #ifndef CONFIG_64BIT - raw_spin_unlock_irq(&cpu_rq(cpu)->lock); + raw_spin_rq_unlock_irq(cpu_rq(cpu)); #endif return data; @@ -141,14 +141,14 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) /* * Take rq->lock to make 64-bit write safe on 32-bit platforms. */ - raw_spin_lock_irq(&cpu_rq(cpu)->lock); + raw_spin_rq_lock_irq(cpu_rq(cpu)); #endif for (i = 0; i < CPUACCT_STAT_NSTATS; i++) cpuusage->usages[i] = val; #ifndef CONFIG_64BIT - raw_spin_unlock_irq(&cpu_rq(cpu)->lock); + raw_spin_rq_unlock_irq(cpu_rq(cpu)); #endif } @@ -253,13 +253,13 @@ static int cpuacct_all_seq_show(struct seq_file *m, void *V) * Take rq->lock to make 64-bit read safe on 32-bit * platforms. */ - raw_spin_lock_irq(&cpu_rq(cpu)->lock); + raw_spin_rq_lock_irq(cpu_rq(cpu)); #endif seq_printf(m, " %llu", cpuusage->usages[index]); #ifndef CONFIG_64BIT - raw_spin_unlock_irq(&cpu_rq(cpu)->lock); + raw_spin_rq_unlock_irq(cpu_rq(cpu)); #endif } seq_puts(m, "\n"); diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 4f09afd2f321..57124614363d 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -151,6 +151,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, unsigned int freq = arch_scale_freq_invariant() ? policy->cpuinfo.max_freq : policy->cur; + util = map_util_perf(util); freq = map_util_freq(util, freq, max); if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 9a2989749b8d..aaacd6cfd42f 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -157,7 +157,7 @@ void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq) { u64 old = dl_rq->running_bw; - lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); + lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); dl_rq->running_bw += dl_bw; SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */ SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); @@ -170,7 +170,7 @@ void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq) { u64 old = dl_rq->running_bw; - lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); + lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); dl_rq->running_bw -= dl_bw; SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */ if (dl_rq->running_bw > old) @@ -184,7 +184,7 @@ void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) { u64 old = dl_rq->this_bw; - lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); + lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); dl_rq->this_bw += dl_bw; SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */ } @@ -194,7 +194,7 @@ void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) { u64 old = dl_rq->this_bw; - lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); + lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); dl_rq->this_bw -= dl_bw; SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */ if (dl_rq->this_bw > old) @@ -348,10 +348,10 @@ static void task_non_contending(struct task_struct *p) if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) { if (dl_task(p)) sub_running_bw(dl_se, dl_rq); - if (!dl_task(p) || p->state == TASK_DEAD) { + if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) { struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); - if (p->state == TASK_DEAD) + if (READ_ONCE(p->__state) == TASK_DEAD) sub_rq_bw(&p->dl, &rq->dl); raw_spin_lock(&dl_b->lock); __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); @@ -987,7 +987,7 @@ static int start_dl_timer(struct task_struct *p) ktime_t now, act; s64 delta; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); /* * We want the timer to fire at the deadline, but considering @@ -1097,9 +1097,9 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) * If the runqueue is no longer available, migrate the * task elsewhere. This necessarily changes rq. */ - lockdep_unpin_lock(&rq->lock, rf.cookie); + lockdep_unpin_lock(__rq_lockp(rq), rf.cookie); rq = dl_task_offline_migration(rq, p); - rf.cookie = lockdep_pin_lock(&rq->lock); + rf.cookie = lockdep_pin_lock(__rq_lockp(rq)); update_rq_clock(rq); /* @@ -1355,10 +1355,10 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer) sched_clock_tick(); update_rq_clock(rq); - if (!dl_task(p) || p->state == TASK_DEAD) { + if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) { struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); - if (p->state == TASK_DEAD && dl_se->dl_non_contending) { + if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) { sub_running_bw(&p->dl, dl_rq_of_se(&p->dl)); sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl)); dl_se->dl_non_contending = 0; @@ -1722,7 +1722,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused { struct rq *rq; - if (p->state != TASK_WAKING) + if (READ_ONCE(p->__state) != TASK_WAKING) return; rq = task_rq(p); @@ -1731,7 +1731,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused * from try_to_wake_up(). Hence, p->pi_lock is locked, but * rq->lock is not... So, lock it */ - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); if (p->dl.dl_non_contending) { sub_running_bw(&p->dl, &rq->dl); p->dl.dl_non_contending = 0; @@ -1746,7 +1746,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused put_task_struct(p); } sub_rq_bw(&p->dl, &rq->dl); - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); } static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) @@ -1852,7 +1852,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, return rb_entry(left, struct sched_dl_entity, rb_node); } -static struct task_struct *pick_next_task_dl(struct rq *rq) +static struct task_struct *pick_task_dl(struct rq *rq) { struct sched_dl_entity *dl_se; struct dl_rq *dl_rq = &rq->dl; @@ -1864,7 +1864,18 @@ static struct task_struct *pick_next_task_dl(struct rq *rq) dl_se = pick_next_dl_entity(rq, dl_rq); BUG_ON(!dl_se); p = dl_task_of(dl_se); - set_next_task_dl(rq, p, true); + + return p; +} + +static struct task_struct *pick_next_task_dl(struct rq *rq) +{ + struct task_struct *p; + + p = pick_task_dl(rq); + if (p) + set_next_task_dl(rq, p, true); + return p; } @@ -2291,10 +2302,10 @@ skip: double_unlock_balance(this_rq, src_rq); if (push_task) { - raw_spin_unlock(&this_rq->lock); + raw_spin_rq_unlock(this_rq); stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, push_task, &src_rq->push_work); - raw_spin_lock(&this_rq->lock); + raw_spin_rq_lock(this_rq); } } @@ -2486,6 +2497,8 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) check_preempt_curr_dl(rq, p, 0); else resched_curr(rq); + } else { + update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); } } @@ -2539,6 +2552,7 @@ DEFINE_SCHED_CLASS(dl) = { #ifdef CONFIG_SMP .balance = balance_dl, + .pick_task = pick_task_dl, .select_task_rq = select_task_rq_dl, .migrate_task_rq = migrate_task_rq_dl, .set_cpus_allowed = set_cpus_allowed_dl, diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 9c882f20803e..0c5ec2776ddf 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -576,7 +576,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", SPLIT_NS(cfs_rq->exec_clock)); - raw_spin_lock_irqsave(&rq->lock, flags); + raw_spin_rq_lock_irqsave(rq, flags); if (rb_first_cached(&cfs_rq->tasks_timeline)) MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; last = __pick_last_entity(cfs_rq); @@ -584,7 +584,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) max_vruntime = last->vruntime; min_vruntime = cfs_rq->min_vruntime; rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; - raw_spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_rq_unlock_irqrestore(rq, flags); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", SPLIT_NS(MIN_vruntime)); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", @@ -885,6 +885,7 @@ static const struct seq_operations sched_debug_sops = { #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F)) #define __P(F) __PS(#F, F) #define P(F) __PS(#F, p->F) +#define PM(F, M) __PS(#F, p->F & (M)) #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F))) #define __PN(F) __PSN(#F, F) #define PN(F) __PSN(#F, p->F) @@ -1011,7 +1012,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, P(se.avg.util_avg); P(se.avg.last_update_time); P(se.avg.util_est.ewma); - P(se.avg.util_est.enqueued); + PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED); #endif #ifdef CONFIG_UCLAMP_TASK __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3248e24a90b0..fb469b26b00a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -268,33 +268,11 @@ const struct sched_class fair_sched_class; */ #ifdef CONFIG_FAIR_GROUP_SCHED -static inline struct task_struct *task_of(struct sched_entity *se) -{ - SCHED_WARN_ON(!entity_is_task(se)); - return container_of(se, struct task_struct, se); -} /* Walk up scheduling entities hierarchy */ #define for_each_sched_entity(se) \ for (; se; se = se->parent) -static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) -{ - return p->se.cfs_rq; -} - -/* runqueue on which this entity is (to be) queued */ -static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) -{ - return se->cfs_rq; -} - -/* runqueue "owned" by this group */ -static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) -{ - return grp->my_q; -} - static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) { if (!path) @@ -455,33 +433,9 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse) #else /* !CONFIG_FAIR_GROUP_SCHED */ -static inline struct task_struct *task_of(struct sched_entity *se) -{ - return container_of(se, struct task_struct, se); -} - #define for_each_sched_entity(se) \ for (; se; se = NULL) -static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) -{ - return &task_rq(p)->cfs; -} - -static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) -{ - struct task_struct *p = task_of(se); - struct rq *rq = task_rq(p); - - return &rq->cfs; -} - -/* runqueue "owned" by this group */ -static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) -{ - return NULL; -} - static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) { if (path) @@ -1039,11 +993,14 @@ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { struct task_struct *tsk = task_of(se); + unsigned int state; - if (tsk->state & TASK_INTERRUPTIBLE) + /* XXX racy against TTWU */ + state = READ_ONCE(tsk->__state); + if (state & TASK_INTERRUPTIBLE) __schedstat_set(se->statistics.sleep_start, rq_clock(rq_of(cfs_rq))); - if (tsk->state & TASK_UNINTERRUPTIBLE) + if (state & TASK_UNINTERRUPTIBLE) __schedstat_set(se->statistics.block_start, rq_clock(rq_of(cfs_rq))); } @@ -1107,7 +1064,7 @@ struct numa_group { static struct numa_group *deref_task_numa_group(struct task_struct *p) { return rcu_dereference_check(p->numa_group, p == current || - (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu))); + (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu))); } static struct numa_group *deref_curr_numa_group(struct task_struct *p) @@ -3139,7 +3096,7 @@ void reweight_task(struct task_struct *p, int prio) * * tg->weight * grq->load.weight * ge->load.weight = ----------------------------- (1) - * \Sum grq->load.weight + * \Sum grq->load.weight * * Now, because computing that sum is prohibitively expensive to compute (been * there, done that) we approximate it with this average stuff. The average @@ -3153,7 +3110,7 @@ void reweight_task(struct task_struct *p, int prio) * * tg->weight * grq->avg.load_avg * ge->load.weight = ------------------------------ (3) - * tg->load_avg + * tg->load_avg * * Where: tg->load_avg ~= \Sum grq->avg.load_avg * @@ -3169,7 +3126,7 @@ void reweight_task(struct task_struct *p, int prio) * * tg->weight * grq->load.weight * ge->load.weight = ----------------------------- = tg->weight (4) - * grp->load.weight + * grp->load.weight * * That is, the sum collapses because all other CPUs are idle; the UP scenario. * @@ -3188,7 +3145,7 @@ void reweight_task(struct task_struct *p, int prio) * * tg->weight * grq->load.weight * ge->load.weight = ----------------------------- (6) - * tg_load_avg' + * tg_load_avg' * * Where: * @@ -3298,6 +3255,61 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) #ifdef CONFIG_SMP #ifdef CONFIG_FAIR_GROUP_SCHED +/* + * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list + * immediately before a parent cfs_rq, and cfs_rqs are removed from the list + * bottom-up, we only have to test whether the cfs_rq before us on the list + * is our child. + * If cfs_rq is not on the list, test whether a child needs its to be added to + * connect a branch to the tree * (see list_add_leaf_cfs_rq() for details). + */ +static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq) +{ + struct cfs_rq *prev_cfs_rq; + struct list_head *prev; + + if (cfs_rq->on_list) { + prev = cfs_rq->leaf_cfs_rq_list.prev; + } else { + struct rq *rq = rq_of(cfs_rq); + + prev = rq->tmp_alone_branch; + } + + prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list); + + return (prev_cfs_rq->tg->parent == cfs_rq->tg); +} + +static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) +{ + if (cfs_rq->load.weight) + return false; + + if (cfs_rq->avg.load_sum) + return false; + + if (cfs_rq->avg.util_sum) + return false; + + if (cfs_rq->avg.runnable_sum) + return false; + + if (child_cfs_rq_on_list(cfs_rq)) + return false; + + /* + * _avg must be null when _sum are null because _avg = _sum / divider + * Make sure that rounding and/or propagation of PELT values never + * break this. + */ + SCHED_WARN_ON(cfs_rq->avg.load_avg || + cfs_rq->avg.util_avg || + cfs_rq->avg.runnable_avg); + + return true; +} + /** * update_tg_load_avg - update the tg's load avg * @cfs_rq: the cfs_rq whose avg changed @@ -3499,10 +3511,9 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf static inline void update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) { - long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum; + long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum; unsigned long load_avg; u64 load_sum = 0; - s64 delta_sum; u32 divider; if (!runnable_sum) @@ -3549,13 +3560,16 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq load_sum = (s64)se_weight(se) * runnable_sum; load_avg = div_s64(load_sum, divider); - delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; - delta_avg = load_avg - se->avg.load_avg; - se->avg.load_sum = runnable_sum; + + delta = load_avg - se->avg.load_avg; + if (!delta) + return; + se->avg.load_avg = load_avg; - add_positive(&cfs_rq->avg.load_avg, delta_avg); - add_positive(&cfs_rq->avg.load_sum, delta_sum); + + add_positive(&cfs_rq->avg.load_avg, delta); + cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider; } static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) @@ -3671,15 +3685,15 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) r = removed_load; sub_positive(&sa->load_avg, r); - sub_positive(&sa->load_sum, r * divider); + sa->load_sum = sa->load_avg * divider; r = removed_util; sub_positive(&sa->util_avg, r); - sub_positive(&sa->util_sum, r * divider); + sa->util_sum = sa->util_avg * divider; r = removed_runnable; sub_positive(&sa->runnable_avg, r); - sub_positive(&sa->runnable_sum, r * divider); + sa->runnable_sum = sa->runnable_avg * divider; /* * removed_runnable is the unweighted version of removed_load so we @@ -3766,11 +3780,17 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s */ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { + /* + * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. + * See ___update_load_avg() for details. + */ + u32 divider = get_pelt_divider(&cfs_rq->avg); + dequeue_load_avg(cfs_rq, se); sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); - sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); + cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider; sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); - sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); + cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider; add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); @@ -3902,7 +3922,7 @@ static inline unsigned long _task_util_est(struct task_struct *p) { struct util_est ue = READ_ONCE(p->se.avg.util_est); - return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED); + return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED)); } static inline unsigned long task_util_est(struct task_struct *p) @@ -4002,7 +4022,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq, * Reset EWMA on utilization increases, the moving average is used only * to smooth utilization decreases. */ - ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED); + ue.enqueued = task_util(p); if (sched_feat(UTIL_EST_FASTUP)) { if (ue.ewma < ue.enqueued) { ue.ewma = ue.enqueued; @@ -4051,6 +4071,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq, ue.ewma += last_ewma_diff; ue.ewma >>= UTIL_EST_WEIGHT_SHIFT; done: + ue.enqueued |= UTIL_AVG_UNCHANGED; WRITE_ONCE(p->se.avg.util_est, ue); trace_sched_util_est_se_tp(&p->se); @@ -4085,6 +4106,11 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) #else /* CONFIG_SMP */ +static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) +{ + return true; +} + #define UPDATE_TG 0x0 #define SKIP_AGE_LOAD 0x0 #define DO_ATTACH 0x0 @@ -4419,6 +4445,8 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) static void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) { + clear_buddies(cfs_rq, se); + /* 'current' is not kept within the tree. */ if (se->on_rq) { /* @@ -4478,7 +4506,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) * Avoid running the skip buddy, if running something else can * be done without getting too unfair. */ - if (cfs_rq->skip == se) { + if (cfs_rq->skip && cfs_rq->skip == se) { struct sched_entity *second; if (se == curr) { @@ -4505,8 +4533,6 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) se = cfs_rq->last; } - clear_buddies(cfs_rq, se); - return se; } @@ -4628,8 +4654,11 @@ static inline u64 sched_cfs_bandwidth_slice(void) */ void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) { - if (cfs_b->quota != RUNTIME_INF) - cfs_b->runtime = cfs_b->quota; + if (unlikely(cfs_b->quota == RUNTIME_INF)) + return; + + cfs_b->runtime += cfs_b->quota; + cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst); } static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) @@ -4743,8 +4772,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data) cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - cfs_rq->throttled_clock_task; - /* Add cfs_rq with already running entity in the list */ - if (cfs_rq->nr_running >= 1) + /* Add cfs_rq with load or one or more already running entities to the list */ + if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running) list_add_leaf_cfs_rq(cfs_rq); } @@ -4990,6 +5019,9 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u throttled = !list_empty(&cfs_b->throttled_cfs_rq); cfs_b->nr_periods += overrun; + /* Refill extra burst quota even if cfs_b->idle */ + __refill_cfs_bandwidth_runtime(cfs_b); + /* * idle depends on !throttled (for the case of a large deficit), and if * we're going inactive then everything else can be deferred @@ -4997,8 +5029,6 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u if (cfs_b->idle && !throttled) goto out_deactivate; - __refill_cfs_bandwidth_runtime(cfs_b); - if (!throttled) { /* mark as potentially idle for the upcoming period */ cfs_b->idle = 1; @@ -5248,6 +5278,7 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) if (new < max_cfs_quota_period) { cfs_b->period = ns_to_ktime(new); cfs_b->quota *= 2; + cfs_b->burst *= 2; pr_warn_ratelimited( "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n", @@ -5279,6 +5310,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) cfs_b->runtime = 0; cfs_b->quota = RUNTIME_INF; cfs_b->period = ns_to_ktime(default_cfs_period()); + cfs_b->burst = 0; INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); @@ -5328,7 +5360,7 @@ static void __maybe_unused update_runtime_enabled(struct rq *rq) { struct task_group *tg; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); rcu_read_lock(); list_for_each_entry_rcu(tg, &task_groups, list) { @@ -5347,7 +5379,7 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) { struct task_group *tg; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); rcu_read_lock(); list_for_each_entry_rcu(tg, &task_groups, list) { @@ -5935,11 +5967,15 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this /* Traverse only the allowed CPUs */ for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { + struct rq *rq = cpu_rq(i); + + if (!sched_core_cookie_match(rq, p)) + continue; + if (sched_idle_cpu(i)) return i; if (available_idle_cpu(i)) { - struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); if (idle && idle->exit_latency < min_exit_latency) { /* @@ -6025,9 +6061,10 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p return new_cpu; } -static inline int __select_idle_cpu(int cpu) +static inline int __select_idle_cpu(int cpu, struct task_struct *p) { - if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) + if ((available_idle_cpu(cpu) || sched_idle_cpu(cpu)) && + sched_cpu_cookie_match(cpu_rq(cpu), p)) return cpu; return -1; @@ -6097,7 +6134,7 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu int cpu; if (!static_branch_likely(&sched_smt_present)) - return __select_idle_cpu(core); + return __select_idle_cpu(core, p); for_each_cpu(cpu, cpu_smt_mask(core)) { if (!available_idle_cpu(cpu)) { @@ -6153,7 +6190,7 @@ static inline bool test_idle_cores(int cpu, bool def) static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu) { - return __select_idle_cpu(core); + return __select_idle_cpu(core, p); } static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) @@ -6172,9 +6209,10 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool { struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); int i, cpu, idle_cpu = -1, nr = INT_MAX; + struct rq *this_rq = this_rq(); int this = smp_processor_id(); struct sched_domain *this_sd; - u64 time; + u64 time = 0; this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); if (!this_sd) @@ -6184,12 +6222,21 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool if (sched_feat(SIS_PROP) && !has_idle_core) { u64 avg_cost, avg_idle, span_avg; + unsigned long now = jiffies; /* - * Due to large variance we need a large fuzz factor; - * hackbench in particularly is sensitive here. + * If we're busy, the assumption that the last idle period + * predicts the future is flawed; age away the remaining + * predicted idle time. */ - avg_idle = this_rq()->avg_idle / 512; + if (unlikely(this_rq->wake_stamp < now)) { + while (this_rq->wake_stamp < now && this_rq->wake_avg_idle) { + this_rq->wake_stamp++; + this_rq->wake_avg_idle >>= 1; + } + } + + avg_idle = this_rq->wake_avg_idle; avg_cost = this_sd->avg_scan_cost + 1; span_avg = sd->span_weight * avg_idle; @@ -6210,7 +6257,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool } else { if (!--nr) return -1; - idle_cpu = __select_idle_cpu(cpu); + idle_cpu = __select_idle_cpu(cpu, p); if ((unsigned int)idle_cpu < nr_cpumask_bits) break; } @@ -6221,6 +6268,13 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool if (sched_feat(SIS_PROP) && !has_idle_core) { time = cpu_clock(this) - time; + + /* + * Account for the scan cost of wakeups against the average + * idle time. + */ + this_rq->wake_avg_idle -= min(this_rq->wake_avg_idle, time); + update_avg(&this_sd->avg_scan_cost, time); } @@ -6288,6 +6342,11 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) task_util = uclamp_task_util(p); } + /* + * per-cpu select_idle_mask usage + */ + lockdep_assert_irqs_disabled(); + if ((available_idle_cpu(target) || sched_idle_cpu(target)) && asym_fits_capacity(task_util, target)) return target; @@ -6563,8 +6622,11 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd) struct cpumask *pd_mask = perf_domain_span(pd); unsigned long cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask)); unsigned long max_util = 0, sum_util = 0; + unsigned long _cpu_cap = cpu_cap; int cpu; + _cpu_cap -= arch_scale_thermal_pressure(cpumask_first(pd_mask)); + /* * The capacity state of CPUs of the current rd can be driven by CPUs * of another rd if they belong to the same pd. So, account for the @@ -6600,8 +6662,10 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd) * is already enough to scale the EM reported power * consumption at the (eventually clamped) cpu_capacity. */ - sum_util += effective_cpu_util(cpu, util_running, cpu_cap, - ENERGY_UTIL, NULL); + cpu_util = effective_cpu_util(cpu, util_running, cpu_cap, + ENERGY_UTIL, NULL); + + sum_util += min(cpu_util, _cpu_cap); /* * Performance domain frequency: utilization clamping @@ -6612,10 +6676,10 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd) */ cpu_util = effective_cpu_util(cpu, util_freq, cpu_cap, FREQUENCY_UTIL, tsk); - max_util = max(max_util, cpu_util); + max_util = max(max_util, min(cpu_util, _cpu_cap)); } - return em_cpu_energy(pd->em_pd, max_util, sum_util); + return em_cpu_energy(pd->em_pd, max_util, sum_util, _cpu_cap); } /* @@ -6661,15 +6725,15 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) { unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX; struct root_domain *rd = cpu_rq(smp_processor_id())->rd; + int cpu, best_energy_cpu = prev_cpu, target = -1; unsigned long cpu_cap, util, base_energy = 0; - int cpu, best_energy_cpu = prev_cpu; struct sched_domain *sd; struct perf_domain *pd; rcu_read_lock(); pd = rcu_dereference(rd->pd); if (!pd || READ_ONCE(rd->overutilized)) - goto fail; + goto unlock; /* * Energy-aware wake-up happens on the lowest sched_domain starting @@ -6679,7 +6743,9 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) sd = sd->parent; if (!sd) - goto fail; + goto unlock; + + target = prev_cpu; sync_entity_load_avg(&p->se); if (!task_util_est(p)) @@ -6687,13 +6753,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) for (; pd; pd = pd->next) { unsigned long cur_delta, spare_cap, max_spare_cap = 0; + bool compute_prev_delta = false; unsigned long base_energy_pd; int max_spare_cap_cpu = -1; - /* Compute the 'base' energy of the pd, without @p */ - base_energy_pd = compute_energy(p, -1, pd); - base_energy += base_energy_pd; - for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) { if (!cpumask_test_cpu(cpu, p->cpus_ptr)) continue; @@ -6714,26 +6777,40 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) if (!fits_capacity(util, cpu_cap)) continue; - /* Always use prev_cpu as a candidate. */ if (cpu == prev_cpu) { - prev_delta = compute_energy(p, prev_cpu, pd); - prev_delta -= base_energy_pd; - best_delta = min(best_delta, prev_delta); - } - - /* - * Find the CPU with the maximum spare capacity in - * the performance domain - */ - if (spare_cap > max_spare_cap) { + /* Always use prev_cpu as a candidate. */ + compute_prev_delta = true; + } else if (spare_cap > max_spare_cap) { + /* + * Find the CPU with the maximum spare capacity + * in the performance domain. + */ max_spare_cap = spare_cap; max_spare_cap_cpu = cpu; } } - /* Evaluate the energy impact of using this CPU. */ - if (max_spare_cap_cpu >= 0 && max_spare_cap_cpu != prev_cpu) { + if (max_spare_cap_cpu < 0 && !compute_prev_delta) + continue; + + /* Compute the 'base' energy of the pd, without @p */ + base_energy_pd = compute_energy(p, -1, pd); + base_energy += base_energy_pd; + + /* Evaluate the energy impact of using prev_cpu. */ + if (compute_prev_delta) { + prev_delta = compute_energy(p, prev_cpu, pd); + if (prev_delta < base_energy_pd) + goto unlock; + prev_delta -= base_energy_pd; + best_delta = min(best_delta, prev_delta); + } + + /* Evaluate the energy impact of using max_spare_cap_cpu. */ + if (max_spare_cap_cpu >= 0) { cur_delta = compute_energy(p, max_spare_cap_cpu, pd); + if (cur_delta < base_energy_pd) + goto unlock; cur_delta -= base_energy_pd; if (cur_delta < best_delta) { best_delta = cur_delta; @@ -6741,25 +6818,22 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) } } } -unlock: rcu_read_unlock(); /* * Pick the best CPU if prev_cpu cannot be used, or if it saves at * least 6% of the energy used by prev_cpu. */ - if (prev_delta == ULONG_MAX) - return best_energy_cpu; - - if ((prev_delta - best_delta) > ((prev_delta + base_energy) >> 4)) - return best_energy_cpu; + if ((prev_delta == ULONG_MAX) || + (prev_delta - best_delta) > ((prev_delta + base_energy) >> 4)) + target = best_energy_cpu; - return prev_cpu; + return target; -fail: +unlock: rcu_read_unlock(); - return -1; + return target; } /* @@ -6771,8 +6845,6 @@ fail: * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set. * * Returns the target CPU number. - * - * preempt must be disabled. */ static int select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags) @@ -6785,6 +6857,10 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags) /* SD_flags and WF_flags share the first nibble */ int sd_flag = wake_flags & 0xF; + /* + * required for stable ->cpus_allowed + */ + lockdep_assert_held(&p->pi_lock); if (wake_flags & WF_TTWU) { record_wakee(p); @@ -6849,7 +6925,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) * min_vruntime -- the latter is done by enqueue_entity() when placing * the task on the new runqueue. */ - if (p->state == TASK_WAKING) { + if (READ_ONCE(p->__state) == TASK_WAKING) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); u64 min_vruntime; @@ -6874,7 +6950,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old' * rq->lock and can modify state directly. */ - lockdep_assert_held(&task_rq(p)->lock); + lockdep_assert_rq_held(task_rq(p)); detach_entity_cfs_rq(&p->se); } else { @@ -7078,6 +7154,39 @@ preempt: set_last_buddy(se); } +#ifdef CONFIG_SMP +static struct task_struct *pick_task_fair(struct rq *rq) +{ + struct sched_entity *se; + struct cfs_rq *cfs_rq; + +again: + cfs_rq = &rq->cfs; + if (!cfs_rq->nr_running) + return NULL; + + do { + struct sched_entity *curr = cfs_rq->curr; + + /* When we pick for a remote RQ, we'll not have done put_prev_entity() */ + if (curr) { + if (curr->on_rq) + update_curr(cfs_rq); + else + curr = NULL; + + if (unlikely(check_cfs_rq_runtime(cfs_rq))) + goto again; + } + + se = pick_next_entity(cfs_rq, curr); + cfs_rq = group_cfs_rq(se); + } while (cfs_rq); + + return task_of(se); +} +#endif + struct task_struct * pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { @@ -7501,7 +7610,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env) { s64 delta; - lockdep_assert_held(&env->src_rq->lock); + lockdep_assert_rq_held(env->src_rq); if (p->sched_class != &fair_sched_class) return 0; @@ -7523,6 +7632,14 @@ static int task_hot(struct task_struct *p, struct lb_env *env) if (sysctl_sched_migration_cost == -1) return 1; + + /* + * Don't migrate task if the task's cookie does not match + * with the destination CPU's core cookie. + */ + if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p)) + return 1; + if (sysctl_sched_migration_cost == 0) return 0; @@ -7599,7 +7716,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) { int tsk_cache_hot; - lockdep_assert_held(&env->src_rq->lock); + lockdep_assert_rq_held(env->src_rq); /* * We do not migrate tasks that are: @@ -7688,7 +7805,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) */ static void detach_task(struct task_struct *p, struct lb_env *env) { - lockdep_assert_held(&env->src_rq->lock); + lockdep_assert_rq_held(env->src_rq); deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); set_task_cpu(p, env->dst_cpu); @@ -7704,7 +7821,7 @@ static struct task_struct *detach_one_task(struct lb_env *env) { struct task_struct *p; - lockdep_assert_held(&env->src_rq->lock); + lockdep_assert_rq_held(env->src_rq); list_for_each_entry_reverse(p, &env->src_rq->cfs_tasks, se.group_node) { @@ -7740,7 +7857,7 @@ static int detach_tasks(struct lb_env *env) struct task_struct *p; int detached = 0; - lockdep_assert_held(&env->src_rq->lock); + lockdep_assert_rq_held(env->src_rq); /* * Source run queue has been emptied by another CPU, clear @@ -7870,7 +7987,7 @@ next: */ static void attach_task(struct rq *rq, struct task_struct *p) { - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); BUG_ON(task_rq(p) != rq); activate_task(rq, p, ENQUEUE_NOCLOCK); @@ -7990,23 +8107,6 @@ static bool __update_blocked_others(struct rq *rq, bool *done) #ifdef CONFIG_FAIR_GROUP_SCHED -static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) -{ - if (cfs_rq->load.weight) - return false; - - if (cfs_rq->avg.load_sum) - return false; - - if (cfs_rq->avg.util_sum) - return false; - - if (cfs_rq->avg.runnable_sum) - return false; - - return true; -} - static bool __update_blocked_fair(struct rq *rq, bool *done) { struct cfs_rq *cfs_rq, *pos; @@ -8030,7 +8130,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done) /* Propagate pending load changes to the parent, if any: */ se = cfs_rq->tg->se[cpu]; if (se && !skip_blocked_update(se)) - update_load_avg(cfs_rq_of(se), se, 0); + update_load_avg(cfs_rq_of(se), se, UPDATE_TG); /* * There can be a lot of idle CPU cgroups. Don't let fully @@ -8853,6 +8953,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) p->cpus_ptr)) continue; + /* Skip over this group if no cookie matched */ + if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group)) + continue; + local_group = cpumask_test_cpu(this_cpu, sched_group_span(group)); @@ -9781,7 +9885,7 @@ more_balance: if (need_active_balance(&env)) { unsigned long flags; - raw_spin_lock_irqsave(&busiest->lock, flags); + raw_spin_rq_lock_irqsave(busiest, flags); /* * Don't kick the active_load_balance_cpu_stop, @@ -9789,8 +9893,7 @@ more_balance: * moved to this_cpu: */ if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) { - raw_spin_unlock_irqrestore(&busiest->lock, - flags); + raw_spin_rq_unlock_irqrestore(busiest, flags); goto out_one_pinned; } @@ -9807,7 +9910,7 @@ more_balance: busiest->push_cpu = this_cpu; active_balance = 1; } - raw_spin_unlock_irqrestore(&busiest->lock, flags); + raw_spin_rq_unlock_irqrestore(busiest, flags); if (active_balance) { stop_one_cpu_nowait(cpu_of(busiest), @@ -10592,6 +10695,14 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) u64 curr_cost = 0; update_misfit_status(NULL, this_rq); + + /* + * There is a task waiting to run. No need to search for one. + * Return 0; the task will be enqueued when switching to idle. + */ + if (this_rq->ttwu_pending) + return 0; + /* * We must set idle_stamp _before_ calling idle_balance(), such that we * measure the duration of idle_balance() as idle time. @@ -10624,7 +10735,7 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) goto out; } - raw_spin_unlock(&this_rq->lock); + raw_spin_rq_unlock(this_rq); update_blocked_averages(this_cpu); rcu_read_lock(); @@ -10657,12 +10768,13 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) * Stop searching for tasks to pull if there are * now runnable tasks on this rq. */ - if (pulled_task || this_rq->nr_running > 0) + if (pulled_task || this_rq->nr_running > 0 || + this_rq->ttwu_pending) break; } rcu_read_unlock(); - raw_spin_lock(&this_rq->lock); + raw_spin_rq_lock(this_rq); if (curr_cost > this_rq->max_idle_balance_cost) this_rq->max_idle_balance_cost = curr_cost; @@ -10755,6 +10867,119 @@ static void rq_offline_fair(struct rq *rq) #endif /* CONFIG_SMP */ +#ifdef CONFIG_SCHED_CORE +static inline bool +__entity_slice_used(struct sched_entity *se, int min_nr_tasks) +{ + u64 slice = sched_slice(cfs_rq_of(se), se); + u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime; + + return (rtime * min_nr_tasks > slice); +} + +#define MIN_NR_TASKS_DURING_FORCEIDLE 2 +static inline void task_tick_core(struct rq *rq, struct task_struct *curr) +{ + if (!sched_core_enabled(rq)) + return; + + /* + * If runqueue has only one task which used up its slice and + * if the sibling is forced idle, then trigger schedule to + * give forced idle task a chance. + * + * sched_slice() considers only this active rq and it gets the + * whole slice. But during force idle, we have siblings acting + * like a single runqueue and hence we need to consider runnable + * tasks on this CPU and the forced idle CPU. Ideally, we should + * go through the forced idle rq, but that would be a perf hit. + * We can assume that the forced idle CPU has at least + * MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check + * if we need to give up the CPU. + */ + if (rq->core->core_forceidle && rq->cfs.nr_running == 1 && + __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE)) + resched_curr(rq); +} + +/* + * se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed. + */ +static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool forceidle) +{ + for_each_sched_entity(se) { + struct cfs_rq *cfs_rq = cfs_rq_of(se); + + if (forceidle) { + if (cfs_rq->forceidle_seq == fi_seq) + break; + cfs_rq->forceidle_seq = fi_seq; + } + + cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime; + } +} + +void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi) +{ + struct sched_entity *se = &p->se; + + if (p->sched_class != &fair_sched_class) + return; + + se_fi_update(se, rq->core->core_forceidle_seq, in_fi); +} + +bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool in_fi) +{ + struct rq *rq = task_rq(a); + struct sched_entity *sea = &a->se; + struct sched_entity *seb = &b->se; + struct cfs_rq *cfs_rqa; + struct cfs_rq *cfs_rqb; + s64 delta; + + SCHED_WARN_ON(task_rq(b)->core != rq->core); + +#ifdef CONFIG_FAIR_GROUP_SCHED + /* + * Find an se in the hierarchy for tasks a and b, such that the se's + * are immediate siblings. + */ + while (sea->cfs_rq->tg != seb->cfs_rq->tg) { + int sea_depth = sea->depth; + int seb_depth = seb->depth; + + if (sea_depth >= seb_depth) + sea = parent_entity(sea); + if (sea_depth <= seb_depth) + seb = parent_entity(seb); + } + + se_fi_update(sea, rq->core->core_forceidle_seq, in_fi); + se_fi_update(seb, rq->core->core_forceidle_seq, in_fi); + + cfs_rqa = sea->cfs_rq; + cfs_rqb = seb->cfs_rq; +#else + cfs_rqa = &task_rq(a)->cfs; + cfs_rqb = &task_rq(b)->cfs; +#endif + + /* + * Find delta after normalizing se's vruntime with its cfs_rq's + * min_vruntime_fi, which would have been updated in prior calls + * to se_fi_update(). + */ + delta = (s64)(sea->vruntime - seb->vruntime) + + (s64)(cfs_rqb->min_vruntime_fi - cfs_rqa->min_vruntime_fi); + + return delta > 0; +} +#else +static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {} +#endif + /* * scheduler tick hitting a task of our scheduling class. * @@ -10778,6 +11003,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) update_misfit_status(curr, rq); update_overutilized_status(task_rq(curr)); + + task_tick_core(rq, curr); } /* @@ -10863,7 +11090,7 @@ static inline bool vruntime_normalized(struct task_struct *p) * waiting for actually being woken up by sched_ttwu_pending(). */ if (!se->sum_exec_runtime || - (p->state == TASK_WAKING && p->sched_remote_wakeup)) + (READ_ONCE(p->__state) == TASK_WAKING && p->sched_remote_wakeup)) return true; return false; @@ -11149,9 +11376,9 @@ void unregister_fair_sched_group(struct task_group *tg) rq = cpu_rq(cpu); - raw_spin_lock_irqsave(&rq->lock, flags); + raw_spin_rq_lock_irqsave(rq, flags); list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); - raw_spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_rq_unlock_irqrestore(rq, flags); } } @@ -11273,6 +11500,7 @@ DEFINE_SCHED_CLASS(fair) = { #ifdef CONFIG_SMP .balance = balance_fair, + .pick_task = pick_task_fair, .select_task_rq = select_task_rq_fair, .migrate_task_rq = migrate_task_rq_fair, diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 7ca3d3d86c2a..912b47aa99d8 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -437,8 +437,16 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir { update_idle_core(rq); schedstat_inc(rq->sched_goidle); + queue_core_balance(rq); } +#ifdef CONFIG_SMP +static struct task_struct *pick_task_idle(struct rq *rq) +{ + return rq->idle; +} +#endif + struct task_struct *pick_next_task_idle(struct rq *rq) { struct task_struct *next = rq->idle; @@ -455,10 +463,10 @@ struct task_struct *pick_next_task_idle(struct rq *rq) static void dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) { - raw_spin_unlock_irq(&rq->lock); + raw_spin_rq_unlock_irq(rq); printk(KERN_ERR "bad: scheduling from the idle thread!\n"); dump_stack(); - raw_spin_lock_irq(&rq->lock); + raw_spin_rq_lock_irq(rq); } /* @@ -506,6 +514,7 @@ DEFINE_SCHED_CLASS(idle) = { #ifdef CONFIG_SMP .balance = balance_idle, + .pick_task = pick_task_idle, .select_task_rq = select_task_rq_idle, .set_cpus_allowed = set_cpus_allowed_common, #endif diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 5a6ea03f9882..7f06eaf12818 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -81,11 +81,9 @@ static int __init housekeeping_setup(char *str, enum hk_flags flags) { cpumask_var_t non_housekeeping_mask; cpumask_var_t tmp; - int err; alloc_bootmem_cpumask_var(&non_housekeeping_mask); - err = cpulist_parse(str, non_housekeeping_mask); - if (err < 0 || cpumask_last(non_housekeeping_mask) >= nr_cpu_ids) { + if (cpulist_parse(str, non_housekeeping_mask) < 0) { pr_warn("Housekeeping: nohz_full= or isolcpus= incorrect CPU range\n"); free_bootmem_cpumask_var(non_housekeeping_mask); return 0; diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index 1c79896f1bc0..954b229868d9 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -81,7 +81,7 @@ long calc_load_fold_active(struct rq *this_rq, long adjust) long nr_active, delta = 0; nr_active = this_rq->nr_running - adjust; - nr_active += (long)this_rq->nr_uninterruptible; + nr_active += (int)this_rq->nr_uninterruptible; if (nr_active != this_rq->calc_load_active) { delta = nr_active - this_rq->calc_load_active; diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h index 1462846d244e..e06071bf3472 100644 --- a/kernel/sched/pelt.h +++ b/kernel/sched/pelt.h @@ -42,15 +42,6 @@ static inline u32 get_pelt_divider(struct sched_avg *avg) return LOAD_AVG_MAX - 1024 + avg->period_contrib; } -/* - * When a task is dequeued, its estimated utilization should not be update if - * its util_avg has not been updated at least once. - * This flag is used to synchronize util_avg updates with util_est updates. - * We map this information into the LSB bit of the utilization saved at - * dequeue time (i.e. util_est.dequeued). - */ -#define UTIL_AVG_UNCHANGED 0x1 - static inline void cfs_se_util_change(struct sched_avg *avg) { unsigned int enqueued; @@ -58,7 +49,7 @@ static inline void cfs_se_util_change(struct sched_avg *avg) if (!sched_feat(UTIL_EST)) return; - /* Avoid store if the flag has been already set */ + /* Avoid store if the flag has been already reset */ enqueued = avg->util_est.enqueued; if (!(enqueued & UTIL_AVG_UNCHANGED)) return; @@ -141,7 +132,7 @@ static inline void update_idle_rq_clock_pelt(struct rq *rq) static inline u64 rq_clock_pelt(struct rq *rq) { - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); assert_clock_updated(rq); return rq->clock_pelt - rq->lost_idle_time; diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index cc25a3cff41f..1652f2bb54b7 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -148,6 +148,7 @@ static int psi_bug __read_mostly; DEFINE_STATIC_KEY_FALSE(psi_disabled); +DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled); #ifdef CONFIG_PSI_DEFAULT_DISABLED static bool psi_enable; @@ -182,6 +183,8 @@ struct psi_group psi_system = { static void psi_avgs_work(struct work_struct *work); +static void poll_timer_fn(struct timer_list *t); + static void group_init(struct psi_group *group) { int cpu; @@ -201,6 +204,8 @@ static void group_init(struct psi_group *group) memset(group->polling_total, 0, sizeof(group->polling_total)); group->polling_next_update = ULLONG_MAX; group->polling_until = 0; + init_waitqueue_head(&group->poll_wait); + timer_setup(&group->poll_timer, poll_timer_fn, 0); rcu_assign_pointer(group->poll_task, NULL); } @@ -211,6 +216,9 @@ void __init psi_init(void) return; } + if (!cgroup_psi_enabled()) + static_branch_disable(&psi_cgroups_enabled); + psi_period = jiffies_to_nsecs(PSI_FREQ); group_init(&psi_system); } @@ -744,23 +752,23 @@ static void psi_group_change(struct psi_group *group, int cpu, static struct psi_group *iterate_groups(struct task_struct *task, void **iter) { + if (*iter == &psi_system) + return NULL; + #ifdef CONFIG_CGROUPS - struct cgroup *cgroup = NULL; + if (static_branch_likely(&psi_cgroups_enabled)) { + struct cgroup *cgroup = NULL; - if (!*iter) - cgroup = task->cgroups->dfl_cgrp; - else if (*iter == &psi_system) - return NULL; - else - cgroup = cgroup_parent(*iter); + if (!*iter) + cgroup = task->cgroups->dfl_cgrp; + else + cgroup = cgroup_parent(*iter); - if (cgroup && cgroup_parent(cgroup)) { - *iter = cgroup; - return cgroup_psi(cgroup); + if (cgroup && cgroup_parent(cgroup)) { + *iter = cgroup; + return cgroup_psi(cgroup); + } } -#else - if (*iter) - return NULL; #endif *iter = &psi_system; return &psi_system; @@ -1157,9 +1165,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, return ERR_CAST(task); } atomic_set(&group->poll_wakeup, 0); - init_waitqueue_head(&group->poll_wait); wake_up_process(task); - timer_setup(&group->poll_timer, poll_timer_fn, 0); rcu_assign_pointer(group->poll_task, task); } @@ -1211,6 +1217,7 @@ static void psi_trigger_destroy(struct kref *ref) group->poll_task, lockdep_is_held(&group->trigger_lock)); rcu_assign_pointer(group->poll_task, NULL); + del_timer(&group->poll_timer); } } @@ -1223,17 +1230,14 @@ static void psi_trigger_destroy(struct kref *ref) */ synchronize_rcu(); /* - * Destroy the kworker after releasing trigger_lock to prevent a + * Stop kthread 'psimon' after releasing trigger_lock to prevent a * deadlock while waiting for psi_poll_work to acquire trigger_lock */ if (task_to_destroy) { /* * After the RCU grace period has expired, the worker * can no longer be found through group->poll_task. - * But it might have been already scheduled before - * that - deschedule it cleanly before destroying it. */ - del_timer_sync(&group->poll_timer); kthread_stop(task_to_destroy); } kfree(t); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index c286e5ba3c94..3daf42a0f462 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -888,7 +888,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) if (skip) continue; - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); update_rq_clock(rq); if (rt_rq->rt_time) { @@ -926,7 +926,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) if (enqueue) sched_rt_rq_enqueue(rt_rq); - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); } if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)) @@ -1626,7 +1626,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) return rt_task_of(rt_se); } -static struct task_struct *pick_next_task_rt(struct rq *rq) +static struct task_struct *pick_task_rt(struct rq *rq) { struct task_struct *p; @@ -1634,7 +1634,17 @@ static struct task_struct *pick_next_task_rt(struct rq *rq) return NULL; p = _pick_next_task_rt(rq); - set_next_task_rt(rq, p, true); + + return p; +} + +static struct task_struct *pick_next_task_rt(struct rq *rq) +{ + struct task_struct *p = pick_task_rt(rq); + + if (p) + set_next_task_rt(rq, p, true); + return p; } @@ -1894,10 +1904,10 @@ retry: */ push_task = get_push_task(rq); if (push_task) { - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); stop_one_cpu_nowait(rq->cpu, push_cpu_stop, push_task, &rq->push_work); - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); } return 0; @@ -2122,10 +2132,10 @@ void rto_push_irq_work_func(struct irq_work *work) * When it gets updated, a check is made if a push is possible. */ if (has_pushable_tasks(rq)) { - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); while (push_rt_task(rq, true)) ; - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); } raw_spin_lock(&rd->rto_lock); @@ -2243,10 +2253,10 @@ skip: double_unlock_balance(this_rq, src_rq); if (push_task) { - raw_spin_unlock(&this_rq->lock); + raw_spin_rq_unlock(this_rq); stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, push_task, &src_rq->push_work); - raw_spin_lock(&this_rq->lock); + raw_spin_rq_lock(this_rq); } } @@ -2331,13 +2341,20 @@ void __init init_sched_rt_class(void) static void switched_to_rt(struct rq *rq, struct task_struct *p) { /* - * If we are already running, then there's nothing - * that needs to be done. But if we are not running - * we may need to preempt the current running task. - * If that current running task is also an RT task + * If we are running, update the avg_rt tracking, as the running time + * will now on be accounted into the latter. + */ + if (task_current(rq, p)) { + update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); + return; + } + + /* + * If we are not running we may need to preempt the current + * running task. If that current running task is also an RT task * then see if we can move to another run queue. */ - if (task_on_rq_queued(p) && rq->curr != p) { + if (task_on_rq_queued(p)) { #ifdef CONFIG_SMP if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) rt_queue_push_tasks(rq); @@ -2483,6 +2500,7 @@ DEFINE_SCHED_CLASS(rt) = { #ifdef CONFIG_SMP .balance = balance_rt, + .pick_task = pick_task_rt, .select_task_rq = select_task_rq_rt, .set_cpus_allowed = set_cpus_allowed_common, .rq_online = rq_online_rt, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index a189bec13729..c80d42e9589b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -366,6 +366,7 @@ struct cfs_bandwidth { ktime_t period; u64 quota; u64 runtime; + u64 burst; s64 hierarchical_quota; u8 idle; @@ -526,6 +527,11 @@ struct cfs_rq { u64 exec_clock; u64 min_vruntime; +#ifdef CONFIG_SCHED_CORE + unsigned int forceidle_seq; + u64 min_vruntime_fi; +#endif + #ifndef CONFIG_64BIT u64 min_vruntime_copy; #endif @@ -631,8 +637,8 @@ struct rt_rq { } highest_prio; #endif #ifdef CONFIG_SMP - unsigned long rt_nr_migratory; - unsigned long rt_nr_total; + unsigned int rt_nr_migratory; + unsigned int rt_nr_total; int overloaded; struct plist_head pushable_tasks; @@ -646,7 +652,7 @@ struct rt_rq { raw_spinlock_t rt_runtime_lock; #ifdef CONFIG_RT_GROUP_SCHED - unsigned long rt_nr_boosted; + unsigned int rt_nr_boosted; struct rq *rq; struct task_group *tg; @@ -663,7 +669,7 @@ struct dl_rq { /* runqueue is an rbtree, ordered by deadline */ struct rb_root_cached root; - unsigned long dl_nr_running; + unsigned int dl_nr_running; #ifdef CONFIG_SMP /* @@ -677,7 +683,7 @@ struct dl_rq { u64 next; } earliest_dl; - unsigned long dl_nr_migratory; + unsigned int dl_nr_migratory; int overloaded; /* @@ -905,7 +911,7 @@ DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); */ struct rq { /* runqueue lock: */ - raw_spinlock_t lock; + raw_spinlock_t __lock; /* * nr_running and cpu_load should be in the same cacheline because @@ -955,7 +961,7 @@ struct rq { * one CPU and if it got migrated afterwards it may decrease * it on another CPU. Always updated under the runqueue lock: */ - unsigned long nr_uninterruptible; + unsigned int nr_uninterruptible; struct task_struct __rcu *curr; struct task_struct *idle; @@ -1017,6 +1023,9 @@ struct rq { u64 idle_stamp; u64 avg_idle; + unsigned long wake_stamp; + u64 wake_avg_idle; + /* This is used to determine avg_idle's max value */ u64 max_idle_balance_cost; @@ -1075,6 +1084,22 @@ struct rq { #endif unsigned int push_busy; struct cpu_stop_work push_work; + +#ifdef CONFIG_SCHED_CORE + /* per rq */ + struct rq *core; + struct task_struct *core_pick; + unsigned int core_enabled; + unsigned int core_sched_seq; + struct rb_root core_tree; + + /* shared state */ + unsigned int core_task_seq; + unsigned int core_pick_seq; + unsigned long core_cookie; + unsigned char core_forceidle; + unsigned int core_forceidle_seq; +#endif }; #ifdef CONFIG_FAIR_GROUP_SCHED @@ -1113,6 +1138,206 @@ static inline bool is_migration_disabled(struct task_struct *p) #endif } +struct sched_group; +#ifdef CONFIG_SCHED_CORE +static inline struct cpumask *sched_group_span(struct sched_group *sg); + +DECLARE_STATIC_KEY_FALSE(__sched_core_enabled); + +static inline bool sched_core_enabled(struct rq *rq) +{ + return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled; +} + +static inline bool sched_core_disabled(void) +{ + return !static_branch_unlikely(&__sched_core_enabled); +} + +/* + * Be careful with this function; not for general use. The return value isn't + * stable unless you actually hold a relevant rq->__lock. + */ +static inline raw_spinlock_t *rq_lockp(struct rq *rq) +{ + if (sched_core_enabled(rq)) + return &rq->core->__lock; + + return &rq->__lock; +} + +static inline raw_spinlock_t *__rq_lockp(struct rq *rq) +{ + if (rq->core_enabled) + return &rq->core->__lock; + + return &rq->__lock; +} + +bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi); + +/* + * Helpers to check if the CPU's core cookie matches with the task's cookie + * when core scheduling is enabled. + * A special case is that the task's cookie always matches with CPU's core + * cookie if the CPU is in an idle core. + */ +static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) +{ + /* Ignore cookie match if core scheduler is not enabled on the CPU. */ + if (!sched_core_enabled(rq)) + return true; + + return rq->core->core_cookie == p->core_cookie; +} + +static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) +{ + bool idle_core = true; + int cpu; + + /* Ignore cookie match if core scheduler is not enabled on the CPU. */ + if (!sched_core_enabled(rq)) + return true; + + for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { + if (!available_idle_cpu(cpu)) { + idle_core = false; + break; + } + } + + /* + * A CPU in an idle core is always the best choice for tasks with + * cookies. + */ + return idle_core || rq->core->core_cookie == p->core_cookie; +} + +static inline bool sched_group_cookie_match(struct rq *rq, + struct task_struct *p, + struct sched_group *group) +{ + int cpu; + + /* Ignore cookie match if core scheduler is not enabled on the CPU. */ + if (!sched_core_enabled(rq)) + return true; + + for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { + if (sched_core_cookie_match(rq, p)) + return true; + } + return false; +} + +extern void queue_core_balance(struct rq *rq); + +static inline bool sched_core_enqueued(struct task_struct *p) +{ + return !RB_EMPTY_NODE(&p->core_node); +} + +extern void sched_core_enqueue(struct rq *rq, struct task_struct *p); +extern void sched_core_dequeue(struct rq *rq, struct task_struct *p); + +extern void sched_core_get(void); +extern void sched_core_put(void); + +extern unsigned long sched_core_alloc_cookie(void); +extern void sched_core_put_cookie(unsigned long cookie); +extern unsigned long sched_core_get_cookie(unsigned long cookie); +extern unsigned long sched_core_update_cookie(struct task_struct *p, unsigned long cookie); + +#else /* !CONFIG_SCHED_CORE */ + +static inline bool sched_core_enabled(struct rq *rq) +{ + return false; +} + +static inline bool sched_core_disabled(void) +{ + return true; +} + +static inline raw_spinlock_t *rq_lockp(struct rq *rq) +{ + return &rq->__lock; +} + +static inline raw_spinlock_t *__rq_lockp(struct rq *rq) +{ + return &rq->__lock; +} + +static inline void queue_core_balance(struct rq *rq) +{ +} + +static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) +{ + return true; +} + +static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) +{ + return true; +} + +static inline bool sched_group_cookie_match(struct rq *rq, + struct task_struct *p, + struct sched_group *group) +{ + return true; +} +#endif /* CONFIG_SCHED_CORE */ + +static inline void lockdep_assert_rq_held(struct rq *rq) +{ + lockdep_assert_held(__rq_lockp(rq)); +} + +extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass); +extern bool raw_spin_rq_trylock(struct rq *rq); +extern void raw_spin_rq_unlock(struct rq *rq); + +static inline void raw_spin_rq_lock(struct rq *rq) +{ + raw_spin_rq_lock_nested(rq, 0); +} + +static inline void raw_spin_rq_lock_irq(struct rq *rq) +{ + local_irq_disable(); + raw_spin_rq_lock(rq); +} + +static inline void raw_spin_rq_unlock_irq(struct rq *rq) +{ + raw_spin_rq_unlock(rq); + local_irq_enable(); +} + +static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq) +{ + unsigned long flags; + local_irq_save(flags); + raw_spin_rq_lock(rq); + return flags; +} + +static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) +{ + raw_spin_rq_unlock(rq); + local_irq_restore(flags); +} + +#define raw_spin_rq_lock_irqsave(rq, flags) \ +do { \ + flags = _raw_spin_rq_lock_irqsave(rq); \ +} while (0) + #ifdef CONFIG_SCHED_SMT extern void __update_idle_core(struct rq *rq); @@ -1134,6 +1359,57 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); #define cpu_curr(cpu) (cpu_rq(cpu)->curr) #define raw_rq() raw_cpu_ptr(&runqueues) +#ifdef CONFIG_FAIR_GROUP_SCHED +static inline struct task_struct *task_of(struct sched_entity *se) +{ + SCHED_WARN_ON(!entity_is_task(se)); + return container_of(se, struct task_struct, se); +} + +static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) +{ + return p->se.cfs_rq; +} + +/* runqueue on which this entity is (to be) queued */ +static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) +{ + return se->cfs_rq; +} + +/* runqueue "owned" by this group */ +static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) +{ + return grp->my_q; +} + +#else + +static inline struct task_struct *task_of(struct sched_entity *se) +{ + return container_of(se, struct task_struct, se); +} + +static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) +{ + return &task_rq(p)->cfs; +} + +static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) +{ + struct task_struct *p = task_of(se); + struct rq *rq = task_rq(p); + + return &rq->cfs; +} + +/* runqueue "owned" by this group */ +static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) +{ + return NULL; +} +#endif + extern void update_rq_clock(struct rq *rq); static inline u64 __rq_clock_broken(struct rq *rq) @@ -1179,7 +1455,7 @@ static inline void assert_clock_updated(struct rq *rq) static inline u64 rq_clock(struct rq *rq) { - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); assert_clock_updated(rq); return rq->clock; @@ -1187,7 +1463,7 @@ static inline u64 rq_clock(struct rq *rq) static inline u64 rq_clock_task(struct rq *rq) { - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); assert_clock_updated(rq); return rq->clock_task; @@ -1213,7 +1489,7 @@ static inline u64 rq_clock_thermal(struct rq *rq) static inline void rq_clock_skip_update(struct rq *rq) { - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); rq->clock_update_flags |= RQCF_REQ_SKIP; } @@ -1223,7 +1499,7 @@ static inline void rq_clock_skip_update(struct rq *rq) */ static inline void rq_clock_cancel_skipupdate(struct rq *rq) { - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); rq->clock_update_flags &= ~RQCF_REQ_SKIP; } @@ -1254,7 +1530,7 @@ extern struct callback_head balance_push_callback; */ static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) { - rf->cookie = lockdep_pin_lock(&rq->lock); + rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); #ifdef CONFIG_SCHED_DEBUG rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); @@ -1272,12 +1548,12 @@ static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) rf->clock_update_flags = RQCF_UPDATED; #endif - lockdep_unpin_lock(&rq->lock, rf->cookie); + lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); } static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) { - lockdep_repin_lock(&rq->lock, rf->cookie); + lockdep_repin_lock(__rq_lockp(rq), rf->cookie); #ifdef CONFIG_SCHED_DEBUG /* @@ -1298,7 +1574,7 @@ static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) __releases(rq->lock) { rq_unpin_lock(rq, rf); - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); } static inline void @@ -1307,7 +1583,7 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) __releases(p->pi_lock) { rq_unpin_lock(rq, rf); - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); } @@ -1315,7 +1591,7 @@ static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock) { - raw_spin_lock_irqsave(&rq->lock, rf->flags); + raw_spin_rq_lock_irqsave(rq, rf->flags); rq_pin_lock(rq, rf); } @@ -1323,7 +1599,7 @@ static inline void rq_lock_irq(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock) { - raw_spin_lock_irq(&rq->lock); + raw_spin_rq_lock_irq(rq); rq_pin_lock(rq, rf); } @@ -1331,7 +1607,7 @@ static inline void rq_lock(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock) { - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); rq_pin_lock(rq, rf); } @@ -1339,7 +1615,7 @@ static inline void rq_relock(struct rq *rq, struct rq_flags *rf) __acquires(rq->lock) { - raw_spin_lock(&rq->lock); + raw_spin_rq_lock(rq); rq_repin_lock(rq, rf); } @@ -1348,7 +1624,7 @@ rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) __releases(rq->lock) { rq_unpin_lock(rq, rf); - raw_spin_unlock_irqrestore(&rq->lock, rf->flags); + raw_spin_rq_unlock_irqrestore(rq, rf->flags); } static inline void @@ -1356,7 +1632,7 @@ rq_unlock_irq(struct rq *rq, struct rq_flags *rf) __releases(rq->lock) { rq_unpin_lock(rq, rf); - raw_spin_unlock_irq(&rq->lock); + raw_spin_rq_unlock_irq(rq); } static inline void @@ -1364,7 +1640,7 @@ rq_unlock(struct rq *rq, struct rq_flags *rf) __releases(rq->lock) { rq_unpin_lock(rq, rf); - raw_spin_unlock(&rq->lock); + raw_spin_rq_unlock(rq); } static inline struct rq * @@ -1429,7 +1705,7 @@ queue_balance_callback(struct rq *rq, struct callback_head *head, void (*func)(struct rq *rq)) { - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) return; @@ -1844,6 +2120,9 @@ struct sched_class { #ifdef CONFIG_SMP int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags); + + struct task_struct * (*pick_task)(struct rq *rq); + void (*migrate_task_rq)(struct task_struct *p, int new_cpu); void (*task_woken)(struct rq *this_rq, struct task_struct *task); @@ -1893,7 +2172,6 @@ static inline void put_prev_task(struct rq *rq, struct task_struct *prev) static inline void set_next_task(struct rq *rq, struct task_struct *next) { - WARN_ON_ONCE(rq->curr != next); next->sched_class->set_next_task(rq, next, false); } @@ -1969,7 +2247,7 @@ static inline struct task_struct *get_push_task(struct rq *rq) { struct task_struct *p = rq->curr; - lockdep_assert_held(&rq->lock); + lockdep_assert_rq_held(rq); if (rq->push_busy) return NULL; @@ -2181,10 +2459,38 @@ unsigned long arch_scale_freq_capacity(int cpu) } #endif + #ifdef CONFIG_SMP -#ifdef CONFIG_PREEMPTION -static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); +static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) +{ +#ifdef CONFIG_SCHED_CORE + /* + * In order to not have {0,2},{1,3} turn into into an AB-BA, + * order by core-id first and cpu-id second. + * + * Notably: + * + * double_rq_lock(0,3); will take core-0, core-1 lock + * double_rq_lock(1,2); will take core-1, core-0 lock + * + * when only cpu-id is considered. + */ + if (rq1->core->cpu < rq2->core->cpu) + return true; + if (rq1->core->cpu > rq2->core->cpu) + return false; + + /* + * __sched_core_flip() relies on SMT having cpu-id lock order. + */ +#endif + return rq1->cpu < rq2->cpu; +} + +extern void double_rq_lock(struct rq *rq1, struct rq *rq2); + +#ifdef CONFIG_PREEMPTION /* * fair double_lock_balance: Safely acquires both rq->locks in a fair @@ -2199,7 +2505,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) __acquires(busiest->lock) __acquires(this_rq->lock) { - raw_spin_unlock(&this_rq->lock); + raw_spin_rq_unlock(this_rq); double_rq_lock(this_rq, busiest); return 1; @@ -2218,20 +2524,21 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) __acquires(busiest->lock) __acquires(this_rq->lock) { - int ret = 0; - - if (unlikely(!raw_spin_trylock(&busiest->lock))) { - if (busiest < this_rq) { - raw_spin_unlock(&this_rq->lock); - raw_spin_lock(&busiest->lock); - raw_spin_lock_nested(&this_rq->lock, - SINGLE_DEPTH_NESTING); - ret = 1; - } else - raw_spin_lock_nested(&busiest->lock, - SINGLE_DEPTH_NESTING); + if (__rq_lockp(this_rq) == __rq_lockp(busiest)) + return 0; + + if (likely(raw_spin_rq_trylock(busiest))) + return 0; + + if (rq_order_less(this_rq, busiest)) { + raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); + return 0; } - return ret; + + raw_spin_rq_unlock(this_rq); + double_rq_lock(this_rq, busiest); + + return 1; } #endif /* CONFIG_PREEMPTION */ @@ -2241,11 +2548,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) */ static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) { - if (unlikely(!irqs_disabled())) { - /* printk() doesn't work well under rq->lock */ - raw_spin_unlock(&this_rq->lock); - BUG_ON(1); - } + lockdep_assert_irqs_disabled(); return _double_lock_balance(this_rq, busiest); } @@ -2253,8 +2556,9 @@ static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) __releases(busiest->lock) { - raw_spin_unlock(&busiest->lock); - lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); + if (__rq_lockp(this_rq) != __rq_lockp(busiest)) + raw_spin_rq_unlock(busiest); + lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_); } static inline void double_lock(spinlock_t *l1, spinlock_t *l2) @@ -2285,31 +2589,6 @@ static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) } /* - * double_rq_lock - safely lock two runqueues - * - * Note this does not disable interrupts like task_rq_lock, - * you need to do so manually before calling. - */ -static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) - __acquires(rq1->lock) - __acquires(rq2->lock) -{ - BUG_ON(!irqs_disabled()); - if (rq1 == rq2) { - raw_spin_lock(&rq1->lock); - __acquire(rq2->lock); /* Fake it out ;) */ - } else { - if (rq1 < rq2) { - raw_spin_lock(&rq1->lock); - raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); - } else { - raw_spin_lock(&rq2->lock); - raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); - } - } -} - -/* * double_rq_unlock - safely unlock two runqueues * * Note this does not restore interrupts like task_rq_unlock, @@ -2319,11 +2598,11 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) __releases(rq1->lock) __releases(rq2->lock) { - raw_spin_unlock(&rq1->lock); - if (rq1 != rq2) - raw_spin_unlock(&rq2->lock); + if (__rq_lockp(rq1) != __rq_lockp(rq2)) + raw_spin_rq_unlock(rq2); else __release(rq2->lock); + raw_spin_rq_unlock(rq1); } extern void set_rq_online (struct rq *rq); @@ -2344,7 +2623,7 @@ static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) { BUG_ON(!irqs_disabled()); BUG_ON(rq1 != rq2); - raw_spin_lock(&rq1->lock); + raw_spin_rq_lock(rq1); __acquire(rq2->lock); /* Fake it out ;) */ } @@ -2359,7 +2638,7 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) __releases(rq2->lock) { BUG_ON(rq1 != rq2); - raw_spin_unlock(&rq1->lock); + raw_spin_rq_unlock(rq1); __release(rq2->lock); } diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index dc218e9f4558..d8f8eb0c655b 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -25,7 +25,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta) } static inline void -rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) +rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { if (rq) rq->rq_sched_info.run_delay += delta; @@ -42,7 +42,7 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) #else /* !CONFIG_SCHEDSTATS: */ static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { } -static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { } +static inline void rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { } static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { } # define schedstat_enabled() 0 # define __schedstat_inc(var) do { } while (0) @@ -150,29 +150,24 @@ static inline void psi_sched_switch(struct task_struct *prev, #endif /* CONFIG_PSI */ #ifdef CONFIG_SCHED_INFO -static inline void sched_info_reset_dequeued(struct task_struct *t) -{ - t->sched_info.last_queued = 0; -} - /* * We are interested in knowing how long it was from the *first* time a * task was queued to the time that it finally hit a CPU, we call this routine * from dequeue_task() to account for possible rq->clock skew across CPUs. The * delta taken on each CPU would annul the skew. */ -static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) +static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t) { - unsigned long long now = rq_clock(rq), delta = 0; + unsigned long long delta = 0; - if (sched_info_on()) { - if (t->sched_info.last_queued) - delta = now - t->sched_info.last_queued; - } - sched_info_reset_dequeued(t); + if (!t->sched_info.last_queued) + return; + + delta = rq_clock(rq) - t->sched_info.last_queued; + t->sched_info.last_queued = 0; t->sched_info.run_delay += delta; - rq_sched_info_dequeued(rq, delta); + rq_sched_info_dequeue(rq, delta); } /* @@ -182,11 +177,14 @@ static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) */ static void sched_info_arrive(struct rq *rq, struct task_struct *t) { - unsigned long long now = rq_clock(rq), delta = 0; + unsigned long long now, delta = 0; + + if (!t->sched_info.last_queued) + return; - if (t->sched_info.last_queued) - delta = now - t->sched_info.last_queued; - sched_info_reset_dequeued(t); + now = rq_clock(rq); + delta = now - t->sched_info.last_queued; + t->sched_info.last_queued = 0; t->sched_info.run_delay += delta; t->sched_info.last_arrival = now; t->sched_info.pcount++; @@ -197,14 +195,12 @@ static void sched_info_arrive(struct rq *rq, struct task_struct *t) /* * This function is only called from enqueue_task(), but also only updates * the timestamp if it is already not set. It's assumed that - * sched_info_dequeued() will clear that stamp when appropriate. + * sched_info_dequeue() will clear that stamp when appropriate. */ -static inline void sched_info_queued(struct rq *rq, struct task_struct *t) +static inline void sched_info_enqueue(struct rq *rq, struct task_struct *t) { - if (sched_info_on()) { - if (!t->sched_info.last_queued) - t->sched_info.last_queued = rq_clock(rq); - } + if (!t->sched_info.last_queued) + t->sched_info.last_queued = rq_clock(rq); } /* @@ -212,7 +208,7 @@ static inline void sched_info_queued(struct rq *rq, struct task_struct *t) * due, typically, to expiring its time slice (this may also be called when * switching to the idle task). Now we can calculate how long we ran. * Also, if the process is still in the TASK_RUNNING state, call - * sched_info_queued() to mark that it has now again started waiting on + * sched_info_enqueue() to mark that it has now again started waiting on * the runqueue. */ static inline void sched_info_depart(struct rq *rq, struct task_struct *t) @@ -221,8 +217,8 @@ static inline void sched_info_depart(struct rq *rq, struct task_struct *t) rq_sched_info_depart(rq, delta); - if (t->state == TASK_RUNNING) - sched_info_queued(rq, t); + if (task_is_running(t)) + sched_info_enqueue(rq, t); } /* @@ -231,7 +227,7 @@ static inline void sched_info_depart(struct rq *rq, struct task_struct *t) * the idle task.) We are only called when prev != next. */ static inline void -__sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) +sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { /* * prev now departs the CPU. It's not interesting to record @@ -245,18 +241,8 @@ __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct sched_info_arrive(rq, next); } -static inline void -sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) -{ - if (sched_info_on()) - __sched_info_switch(rq, prev, next); -} - #else /* !CONFIG_SCHED_INFO: */ -# define sched_info_queued(rq, t) do { } while (0) -# define sched_info_reset_dequeued(t) do { } while (0) -# define sched_info_dequeued(rq, t) do { } while (0) -# define sched_info_depart(rq, t) do { } while (0) -# define sched_info_arrive(rq, next) do { } while (0) +# define sched_info_enqueue(rq, t) do { } while (0) +# define sched_info_dequeue(rq, t) do { } while (0) # define sched_info_switch(rq, t, next) do { } while (0) #endif /* CONFIG_SCHED_INFO */ diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 55f39125c0e1..f988ebe3febb 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -34,15 +34,24 @@ static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool fir stop->se.exec_start = rq_clock_task(rq); } -static struct task_struct *pick_next_task_stop(struct rq *rq) +static struct task_struct *pick_task_stop(struct rq *rq) { if (!sched_stop_runnable(rq)) return NULL; - set_next_task_stop(rq, rq->stop, true); return rq->stop; } +static struct task_struct *pick_next_task_stop(struct rq *rq) +{ + struct task_struct *p = pick_task_stop(rq); + + if (p) + set_next_task_stop(rq, p, true); + + return p; +} + static void enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) { @@ -123,6 +132,7 @@ DEFINE_SCHED_CLASS(stop) = { #ifdef CONFIG_SMP .balance = balance_stop, + .pick_task = pick_task_stop, .select_task_rq = select_task_rq_stop, .set_cpus_allowed = set_cpus_allowed_common, #endif diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 55a0a243e871..b77ad49dc14f 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -467,7 +467,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd) struct root_domain *old_rd = NULL; unsigned long flags; - raw_spin_lock_irqsave(&rq->lock, flags); + raw_spin_rq_lock_irqsave(rq, flags); if (rq->rd) { old_rd = rq->rd; @@ -493,7 +493,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd) if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) set_rq_online(rq); - raw_spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_rq_unlock_irqrestore(rq, flags); if (old_rd) call_rcu(&old_rd->rcu, free_rootdomain); @@ -675,7 +675,7 @@ static void update_top_cache_domain(int cpu) sd = highest_flag_domain(cpu, SD_ASYM_PACKING); rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd); - sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY); + sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL); rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd); } @@ -1267,6 +1267,116 @@ next: } /* + * Asymmetric CPU capacity bits + */ +struct asym_cap_data { + struct list_head link; + unsigned long capacity; + unsigned long cpus[]; +}; + +/* + * Set of available CPUs grouped by their corresponding capacities + * Each list entry contains a CPU mask reflecting CPUs that share the same + * capacity. + * The lifespan of data is unlimited. + */ +static LIST_HEAD(asym_cap_list); + +#define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus) + +/* + * Verify whether there is any CPU capacity asymmetry in a given sched domain. + * Provides sd_flags reflecting the asymmetry scope. + */ +static inline int +asym_cpu_capacity_classify(const struct cpumask *sd_span, + const struct cpumask *cpu_map) +{ + struct asym_cap_data *entry; + int count = 0, miss = 0; + + /* + * Count how many unique CPU capacities this domain spans across + * (compare sched_domain CPUs mask with ones representing available + * CPUs capacities). Take into account CPUs that might be offline: + * skip those. + */ + list_for_each_entry(entry, &asym_cap_list, link) { + if (cpumask_intersects(sd_span, cpu_capacity_span(entry))) + ++count; + else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry))) + ++miss; + } + + WARN_ON_ONCE(!count && !list_empty(&asym_cap_list)); + + /* No asymmetry detected */ + if (count < 2) + return 0; + /* Some of the available CPU capacity values have not been detected */ + if (miss) + return SD_ASYM_CPUCAPACITY; + + /* Full asymmetry */ + return SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY_FULL; + +} + +static inline void asym_cpu_capacity_update_data(int cpu) +{ + unsigned long capacity = arch_scale_cpu_capacity(cpu); + struct asym_cap_data *entry = NULL; + + list_for_each_entry(entry, &asym_cap_list, link) { + if (capacity == entry->capacity) + goto done; + } + + entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL); + if (WARN_ONCE(!entry, "Failed to allocate memory for asymmetry data\n")) + return; + entry->capacity = capacity; + list_add(&entry->link, &asym_cap_list); +done: + __cpumask_set_cpu(cpu, cpu_capacity_span(entry)); +} + +/* + * Build-up/update list of CPUs grouped by their capacities + * An update requires explicit request to rebuild sched domains + * with state indicating CPU topology changes. + */ +static void asym_cpu_capacity_scan(void) +{ + struct asym_cap_data *entry, *next; + int cpu; + + list_for_each_entry(entry, &asym_cap_list, link) + cpumask_clear(cpu_capacity_span(entry)); + + for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_FLAG_DOMAIN)) + asym_cpu_capacity_update_data(cpu); + + list_for_each_entry_safe(entry, next, &asym_cap_list, link) { + if (cpumask_empty(cpu_capacity_span(entry))) { + list_del(&entry->link); + kfree(entry); + } + } + + /* + * Only one capacity value has been detected i.e. this system is symmetric. + * No need to keep this data around. + */ + if (list_is_singular(&asym_cap_list)) { + entry = list_first_entry(&asym_cap_list, typeof(*entry), link); + list_del(&entry->link); + kfree(entry); + } +} + +/* * Initializers for schedule domains * Non-inlined to reduce accumulated stack pressure in build_sched_domains() */ @@ -1399,11 +1509,12 @@ int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; static struct sched_domain * sd_init(struct sched_domain_topology_level *tl, const struct cpumask *cpu_map, - struct sched_domain *child, int dflags, int cpu) + struct sched_domain *child, int cpu) { struct sd_data *sdd = &tl->data; struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); int sd_id, sd_weight, sd_flags = 0; + struct cpumask *sd_span; #ifdef CONFIG_NUMA /* @@ -1420,9 +1531,6 @@ sd_init(struct sched_domain_topology_level *tl, "wrong sd_flags in topology description\n")) sd_flags &= TOPOLOGY_SD_FLAGS; - /* Apply detected topology flags */ - sd_flags |= dflags; - *sd = (struct sched_domain){ .min_interval = sd_weight, .max_interval = 2*sd_weight, @@ -1454,13 +1562,19 @@ sd_init(struct sched_domain_topology_level *tl, #endif }; - cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); - sd_id = cpumask_first(sched_domain_span(sd)); + sd_span = sched_domain_span(sd); + cpumask_and(sd_span, cpu_map, tl->mask(cpu)); + sd_id = cpumask_first(sd_span); + + sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map); + + WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) == + (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY), + "CPU capacity asymmetry not supported on SMT\n"); /* * Convert topological properties into behaviour. */ - /* Don't attempt to spread across CPUs of different capacities. */ if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child) sd->child->flags &= ~SD_PREFER_SIBLING; @@ -1926,9 +2040,9 @@ static void __sdt_free(const struct cpumask *cpu_map) static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, const struct cpumask *cpu_map, struct sched_domain_attr *attr, - struct sched_domain *child, int dflags, int cpu) + struct sched_domain *child, int cpu) { - struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu); + struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); if (child) { sd->level = child->level + 1; @@ -1991,65 +2105,6 @@ static bool topology_span_sane(struct sched_domain_topology_level *tl, } /* - * Find the sched_domain_topology_level where all CPU capacities are visible - * for all CPUs. - */ -static struct sched_domain_topology_level -*asym_cpu_capacity_level(const struct cpumask *cpu_map) -{ - int i, j, asym_level = 0; - bool asym = false; - struct sched_domain_topology_level *tl, *asym_tl = NULL; - unsigned long cap; - - /* Is there any asymmetry? */ - cap = arch_scale_cpu_capacity(cpumask_first(cpu_map)); - - for_each_cpu(i, cpu_map) { - if (arch_scale_cpu_capacity(i) != cap) { - asym = true; - break; - } - } - - if (!asym) - return NULL; - - /* - * Examine topology from all CPU's point of views to detect the lowest - * sched_domain_topology_level where a highest capacity CPU is visible - * to everyone. - */ - for_each_cpu(i, cpu_map) { - unsigned long max_capacity = arch_scale_cpu_capacity(i); - int tl_id = 0; - - for_each_sd_topology(tl) { - if (tl_id < asym_level) - goto next_level; - - for_each_cpu_and(j, tl->mask(i), cpu_map) { - unsigned long capacity; - - capacity = arch_scale_cpu_capacity(j); - - if (capacity <= max_capacity) - continue; - - max_capacity = capacity; - asym_level = tl_id; - asym_tl = tl; - } -next_level: - tl_id++; - } - } - - return asym_tl; -} - - -/* * Build sched domains for a given set of CPUs and attach the sched domains * to the individual CPUs */ @@ -2061,7 +2116,6 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att struct s_data d; struct rq *rq = NULL; int i, ret = -ENOMEM; - struct sched_domain_topology_level *tl_asym; bool has_asym = false; if (WARN_ON(cpumask_empty(cpu_map))) @@ -2071,24 +2125,19 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att if (alloc_state != sa_rootdomain) goto error; - tl_asym = asym_cpu_capacity_level(cpu_map); - /* Set up domains for CPUs specified by the cpu_map: */ for_each_cpu(i, cpu_map) { struct sched_domain_topology_level *tl; - int dflags = 0; sd = NULL; for_each_sd_topology(tl) { - if (tl == tl_asym) { - dflags |= SD_ASYM_CPUCAPACITY; - has_asym = true; - } if (WARN_ON(!topology_span_sane(tl, cpu_map, i))) goto error; - sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i); + sd = build_sched_domain(tl, cpu_map, attr, sd, i); + + has_asym |= sd->flags & SD_ASYM_CPUCAPACITY; if (tl == sched_domain_topology) *per_cpu_ptr(d.sd, i) = sd; @@ -2217,6 +2266,7 @@ int sched_init_domains(const struct cpumask *cpu_map) zalloc_cpumask_var(&fallback_doms, GFP_KERNEL); arch_update_cpu_topology(); + asym_cpu_capacity_scan(); ndoms_cur = 1; doms_cur = alloc_sched_domains(ndoms_cur); if (!doms_cur) @@ -2299,6 +2349,9 @@ void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], /* Let the architecture update CPU core mappings: */ new_topology = arch_update_cpu_topology(); + /* Trigger rebuilding CPU capacity asymmetry data */ + if (new_topology) + asym_cpu_capacity_scan(); if (!doms_new) { WARN_ON_ONCE(dattr_new); diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 183cc6ae68a6..76577d1642a5 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -264,17 +264,22 @@ prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_ent } EXPORT_SYMBOL(prepare_to_wait); -void +/* Returns true if we are the first waiter in the queue, false otherwise. */ +bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) { unsigned long flags; + bool was_empty = false; wq_entry->flags |= WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&wq_head->lock, flags); - if (list_empty(&wq_entry->entry)) + if (list_empty(&wq_entry->entry)) { + was_empty = list_empty(&wq_head->head); __add_wait_queue_entry_tail(wq_head, wq_entry); + } set_current_state(state); spin_unlock_irqrestore(&wq_head->lock, flags); + return was_empty; } EXPORT_SYMBOL(prepare_to_wait_exclusive); diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 9f58049ac16d..057e17f3215d 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -107,6 +107,7 @@ struct seccomp_knotif { * installing process should allocate the fd as normal. * @flags: The flags for the new file descriptor. At the moment, only O_CLOEXEC * is allowed. + * @ioctl_flags: The flags used for the seccomp_addfd ioctl. * @ret: The return value of the installing process. It is set to the fd num * upon success (>= 0). * @completion: Indicates that the installing process has completed fd @@ -118,6 +119,7 @@ struct seccomp_kaddfd { struct file *file; int fd; unsigned int flags; + __u32 ioctl_flags; union { bool setfd; @@ -1065,18 +1067,37 @@ static u64 seccomp_next_notify_id(struct seccomp_filter *filter) return filter->notif->next_id++; } -static void seccomp_handle_addfd(struct seccomp_kaddfd *addfd) +static void seccomp_handle_addfd(struct seccomp_kaddfd *addfd, struct seccomp_knotif *n) { + int fd; + /* * Remove the notification, and reset the list pointers, indicating * that it has been handled. */ list_del_init(&addfd->list); if (!addfd->setfd) - addfd->ret = receive_fd(addfd->file, addfd->flags); + fd = receive_fd(addfd->file, addfd->flags); else - addfd->ret = receive_fd_replace(addfd->fd, addfd->file, - addfd->flags); + fd = receive_fd_replace(addfd->fd, addfd->file, addfd->flags); + addfd->ret = fd; + + if (addfd->ioctl_flags & SECCOMP_ADDFD_FLAG_SEND) { + /* If we fail reset and return an error to the notifier */ + if (fd < 0) { + n->state = SECCOMP_NOTIFY_SENT; + } else { + /* Return the FD we just added */ + n->flags = 0; + n->error = 0; + n->val = fd; + } + } + + /* + * Mark the notification as completed. From this point, addfd mem + * might be invalidated and we can't safely read it anymore. + */ complete(&addfd->completion); } @@ -1120,7 +1141,7 @@ static int seccomp_do_user_notification(int this_syscall, struct seccomp_kaddfd, list); /* Check if we were woken up by a addfd message */ if (addfd) - seccomp_handle_addfd(addfd); + seccomp_handle_addfd(addfd, &n); } while (n.state != SECCOMP_NOTIFY_REPLIED); @@ -1581,7 +1602,7 @@ static long seccomp_notify_addfd(struct seccomp_filter *filter, if (addfd.newfd_flags & ~O_CLOEXEC) return -EINVAL; - if (addfd.flags & ~SECCOMP_ADDFD_FLAG_SETFD) + if (addfd.flags & ~(SECCOMP_ADDFD_FLAG_SETFD | SECCOMP_ADDFD_FLAG_SEND)) return -EINVAL; if (addfd.newfd && !(addfd.flags & SECCOMP_ADDFD_FLAG_SETFD)) @@ -1591,6 +1612,7 @@ static long seccomp_notify_addfd(struct seccomp_filter *filter, if (!kaddfd.file) return -EBADF; + kaddfd.ioctl_flags = addfd.flags; kaddfd.flags = addfd.newfd_flags; kaddfd.setfd = addfd.flags & SECCOMP_ADDFD_FLAG_SETFD; kaddfd.fd = addfd.newfd; @@ -1616,6 +1638,23 @@ static long seccomp_notify_addfd(struct seccomp_filter *filter, goto out_unlock; } + if (addfd.flags & SECCOMP_ADDFD_FLAG_SEND) { + /* + * Disallow queuing an atomic addfd + send reply while there are + * some addfd requests still to process. + * + * There is no clear reason to support it and allows us to keep + * the loop on the other side straight-forward. + */ + if (!list_empty(&knotif->addfd)) { + ret = -EBUSY; + goto out_unlock; + } + + /* Allow exactly only one reply */ + knotif->state = SECCOMP_NOTIFY_REPLIED; + } + list_add(&kaddfd.list, &knotif->addfd); complete(&knotif->ready); mutex_unlock(&filter->notify_lock); diff --git a/kernel/signal.c b/kernel/signal.c index f7c6ffcbd044..f6371dfa1f89 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -412,8 +412,8 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags, int override_rlimit, const unsigned int sigqueue_flags) { struct sigqueue *q = NULL; - struct user_struct *user; - int sigpending; + struct ucounts *ucounts = NULL; + long sigpending; /* * Protect access to @t credentials. This can go away when all @@ -424,77 +424,38 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags, * changes from/to zero. */ rcu_read_lock(); - user = __task_cred(t)->user; - sigpending = atomic_inc_return(&user->sigpending); + ucounts = task_ucounts(t); + sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1); if (sigpending == 1) - get_uid(user); + ucounts = get_ucounts(ucounts); rcu_read_unlock(); - if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) { - /* - * Preallocation does not hold sighand::siglock so it can't - * use the cache. The lockless caching requires that only - * one consumer and only one producer run at a time. - */ - q = READ_ONCE(t->sigqueue_cache); - if (!q || sigqueue_flags) - q = kmem_cache_alloc(sigqueue_cachep, gfp_flags); - else - WRITE_ONCE(t->sigqueue_cache, NULL); + if (override_rlimit || (sigpending < LONG_MAX && sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) { + q = kmem_cache_alloc(sigqueue_cachep, gfp_flags); } else { print_dropped_signal(sig); } if (unlikely(q == NULL)) { - if (atomic_dec_and_test(&user->sigpending)) - free_uid(user); + if (ucounts && dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) + put_ucounts(ucounts); } else { INIT_LIST_HEAD(&q->list); q->flags = sigqueue_flags; - q->user = user; + q->ucounts = ucounts; } - return q; } -void exit_task_sigqueue_cache(struct task_struct *tsk) -{ - /* Race free because @tsk is mopped up */ - struct sigqueue *q = tsk->sigqueue_cache; - - if (q) { - tsk->sigqueue_cache = NULL; - /* - * Hand it back to the cache as the task might - * be self reaping which would leak the object. - */ - kmem_cache_free(sigqueue_cachep, q); - } -} - -static void sigqueue_cache_or_free(struct sigqueue *q) -{ - /* - * Cache one sigqueue per task. This pairs with the consumer side - * in __sigqueue_alloc() and needs READ/WRITE_ONCE() to prevent the - * compiler from store tearing and to tell KCSAN that the data race - * is intentional when run without holding current->sighand->siglock, - * which is fine as current obviously cannot run __sigqueue_free() - * concurrently. - */ - if (!READ_ONCE(current->sigqueue_cache)) - WRITE_ONCE(current->sigqueue_cache, q); - else - kmem_cache_free(sigqueue_cachep, q); -} - static void __sigqueue_free(struct sigqueue *q) { if (q->flags & SIGQUEUE_PREALLOC) return; - if (atomic_dec_and_test(&q->user->sigpending)) - free_uid(q->user); - sigqueue_cache_or_free(q); + if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) { + put_ucounts(q->ucounts); + q->ucounts = NULL; + } + kmem_cache_free(sigqueue_cachep, q); } void flush_sigqueue(struct sigpending *queue) @@ -2869,6 +2830,8 @@ static void signal_delivered(struct ksignal *ksig, int stepping) if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) sigaddset(&blocked, ksig->sig); set_current_blocked(&blocked); + if (current->sas_ss_flags & SS_AUTODISARM) + sas_ss_reset(current); tracehook_signal_handler(stepping); } @@ -4187,11 +4150,7 @@ int __save_altstack(stack_t __user *uss, unsigned long sp) int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | __put_user(t->sas_ss_flags, &uss->ss_flags) | __put_user(t->sas_ss_size, &uss->ss_size); - if (err) - return err; - if (t->sas_ss_flags & SS_AUTODISARM) - sas_ss_reset(t); - return 0; + return err; } #ifdef CONFIG_COMPAT @@ -4246,11 +4205,7 @@ int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) &uss->ss_sp) | __put_user(t->sas_ss_flags, &uss->ss_flags) | __put_user(t->sas_ss_size, &uss->ss_size); - if (err) - return err; - if (t->sas_ss_flags & SS_AUTODISARM) - sas_ss_reset(t); - return 0; + return err; } #endif @@ -4719,7 +4674,7 @@ void kdb_send_sig(struct task_struct *t, int sig) } new_t = kdb_prev_t != t; kdb_prev_t = t; - if (t->state != TASK_RUNNING && new_t) { + if (!task_is_running(t) && new_t) { spin_unlock(&t->sighand->siglock); kdb_printf("Process is not RUNNING, sending a signal from " "kdb risks deadlock\n" diff --git a/kernel/smpboot.c b/kernel/smpboot.c index f25208e8df83..e4163042c4d6 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -33,7 +33,6 @@ struct task_struct *idle_thread_get(unsigned int cpu) if (!tsk) return ERR_PTR(-ENOMEM); - init_idle(tsk, cpu); return tsk; } diff --git a/kernel/softirq.c b/kernel/softirq.c index 4992853ef53d..f3a012179f47 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -76,7 +76,7 @@ static void wakeup_softirqd(void) /* Interrupts are disabled: no need to stop preemption */ struct task_struct *tsk = __this_cpu_read(ksoftirqd); - if (tsk && tsk->state != TASK_RUNNING) + if (tsk) wake_up_process(tsk); } @@ -92,8 +92,7 @@ static bool ksoftirqd_running(unsigned long pending) if (pending & SOFTIRQ_NOW_MASK) return false; - return tsk && (tsk->state == TASK_RUNNING) && - !__kthread_should_park(tsk); + return tsk && task_is_running(tsk) && !__kthread_should_park(tsk); } #ifdef CONFIG_TRACE_IRQFLAGS diff --git a/kernel/sys.c b/kernel/sys.c index 3a583a29815f..ef1a78f5d71c 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -479,7 +479,7 @@ static int set_user(struct cred *new) * for programs doing set*uid()+execve() by harmlessly deferring the * failure to the execve() stage. */ - if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) && + if (is_ucounts_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) && new_user != INIT_USER) current->flags |= PF_NPROC_EXCEEDED; else @@ -558,6 +558,10 @@ long __sys_setreuid(uid_t ruid, uid_t euid) if (retval < 0) goto error; + retval = set_cred_ucounts(new); + if (retval < 0) + goto error; + return commit_creds(new); error: @@ -616,6 +620,10 @@ long __sys_setuid(uid_t uid) if (retval < 0) goto error; + retval = set_cred_ucounts(new); + if (retval < 0) + goto error; + return commit_creds(new); error: @@ -691,6 +699,10 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) if (retval < 0) goto error; + retval = set_cred_ucounts(new); + if (retval < 0) + goto error; + return commit_creds(new); error: @@ -2550,6 +2562,11 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, error = set_syscall_user_dispatch(arg2, arg3, arg4, (char __user *) arg5); break; +#ifdef CONFIG_SCHED_CORE + case PR_SCHED_CORE: + error = sched_core_share_pid(arg2, arg3, arg4, arg5); + break; +#endif default: error = -EINVAL; break; diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 0ea8128468c3..dad4d994641e 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -99,7 +99,7 @@ COND_SYSCALL(flock); /* fs/quota.c */ COND_SYSCALL(quotactl); -COND_SYSCALL(quotactl_path); +COND_SYSCALL(quotactl_fd); /* fs/readdir.c */ diff --git a/kernel/sysctl-test.c b/kernel/sysctl-test.c index ccb78509f1a8..664ded05dd7a 100644 --- a/kernel/sysctl-test.c +++ b/kernel/sysctl-test.c @@ -49,7 +49,7 @@ static void sysctl_test_api_dointvec_null_tbl_data(struct kunit *test) KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&null_data_table, KUNIT_PROC_READ, buffer, &len, &pos)); - KUNIT_EXPECT_EQ(test, (size_t)0, len); + KUNIT_EXPECT_EQ(test, 0, len); /* * See above. @@ -58,7 +58,7 @@ static void sysctl_test_api_dointvec_null_tbl_data(struct kunit *test) KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&null_data_table, KUNIT_PROC_WRITE, buffer, &len, &pos)); - KUNIT_EXPECT_EQ(test, (size_t)0, len); + KUNIT_EXPECT_EQ(test, 0, len); } /* @@ -95,7 +95,7 @@ static void sysctl_test_api_dointvec_table_maxlen_unset(struct kunit *test) KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&data_maxlen_unset_table, KUNIT_PROC_READ, buffer, &len, &pos)); - KUNIT_EXPECT_EQ(test, (size_t)0, len); + KUNIT_EXPECT_EQ(test, 0, len); /* * See previous comment. @@ -104,7 +104,7 @@ static void sysctl_test_api_dointvec_table_maxlen_unset(struct kunit *test) KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&data_maxlen_unset_table, KUNIT_PROC_WRITE, buffer, &len, &pos)); - KUNIT_EXPECT_EQ(test, (size_t)0, len); + KUNIT_EXPECT_EQ(test, 0, len); } /* @@ -135,11 +135,11 @@ static void sysctl_test_api_dointvec_table_len_is_zero(struct kunit *test) KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, buffer, &len, &pos)); - KUNIT_EXPECT_EQ(test, (size_t)0, len); + KUNIT_EXPECT_EQ(test, 0, len); KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE, buffer, &len, &pos)); - KUNIT_EXPECT_EQ(test, (size_t)0, len); + KUNIT_EXPECT_EQ(test, 0, len); } /* @@ -174,7 +174,7 @@ static void sysctl_test_api_dointvec_table_read_but_position_set( KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, buffer, &len, &pos)); - KUNIT_EXPECT_EQ(test, (size_t)0, len); + KUNIT_EXPECT_EQ(test, 0, len); } /* @@ -203,7 +203,7 @@ static void sysctl_test_dointvec_read_happy_single_positive(struct kunit *test) KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, user_buffer, &len, &pos)); - KUNIT_ASSERT_EQ(test, (size_t)3, len); + KUNIT_ASSERT_EQ(test, 3, len); buffer[len] = '\0'; /* And we read 13 back out. */ KUNIT_EXPECT_STREQ(test, "13\n", buffer); @@ -233,9 +233,9 @@ static void sysctl_test_dointvec_read_happy_single_negative(struct kunit *test) KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, user_buffer, &len, &pos)); - KUNIT_ASSERT_EQ(test, (size_t)4, len); + KUNIT_ASSERT_EQ(test, 4, len); buffer[len] = '\0'; - KUNIT_EXPECT_STREQ(test, "-16\n", (char *)buffer); + KUNIT_EXPECT_STREQ(test, "-16\n", buffer); } /* @@ -265,7 +265,7 @@ static void sysctl_test_dointvec_write_happy_single_positive(struct kunit *test) KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE, user_buffer, &len, &pos)); KUNIT_EXPECT_EQ(test, sizeof(input) - 1, len); - KUNIT_EXPECT_EQ(test, sizeof(input) - 1, (size_t)pos); + KUNIT_EXPECT_EQ(test, sizeof(input) - 1, pos); KUNIT_EXPECT_EQ(test, 9, *((int *)table.data)); } @@ -295,7 +295,7 @@ static void sysctl_test_dointvec_write_happy_single_negative(struct kunit *test) KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE, user_buffer, &len, &pos)); KUNIT_EXPECT_EQ(test, sizeof(input) - 1, len); - KUNIT_EXPECT_EQ(test, sizeof(input) - 1, (size_t)pos); + KUNIT_EXPECT_EQ(test, sizeof(input) - 1, pos); KUNIT_EXPECT_EQ(test, -9, *((int *)table.data)); } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index d4a78e08f6d8..272f4a272f8c 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -27,6 +27,7 @@ #include <linux/sysctl.h> #include <linux/bitmap.h> #include <linux/signal.h> +#include <linux/panic.h> #include <linux/printk.h> #include <linux/proc_fs.h> #include <linux/security.h> @@ -71,6 +72,7 @@ #include <linux/coredump.h> #include <linux/latencytop.h> #include <linux/pid.h> +#include <linux/delayacct.h> #include "../lib/kstrtox.h" @@ -1494,7 +1496,6 @@ int proc_do_large_bitmap(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int err = 0; - bool first = 1; size_t left = *lenp; unsigned long bitmap_len = table->maxlen; unsigned long *bitmap = *(unsigned long **) table->data; @@ -1579,12 +1580,12 @@ int proc_do_large_bitmap(struct ctl_table *table, int write, } bitmap_set(tmp_bitmap, val_a, val_b - val_a + 1); - first = 0; proc_skip_char(&p, &left, '\n'); } left += skipped; } else { unsigned long bit_a, bit_b = 0; + bool first = 1; while (left) { bit_a = find_next_bit(bitmap, bitmap_len, bit_b); @@ -1747,6 +1748,17 @@ static struct ctl_table kern_table[] = { .extra2 = SYSCTL_ONE, }, #endif /* CONFIG_SCHEDSTATS */ +#ifdef CONFIG_TASK_DELAY_ACCT + { + .procname = "task_delayacct", + .data = NULL, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sysctl_delayacct, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +#endif /* CONFIG_TASK_DELAY_ACCT */ #ifdef CONFIG_NUMA_BALANCING { .procname = "numa_balancing", @@ -2909,11 +2921,11 @@ static struct ctl_table vm_table[] = { .extra2 = &one_thousand, }, { - .procname = "percpu_pagelist_fraction", - .data = &percpu_pagelist_fraction, - .maxlen = sizeof(percpu_pagelist_fraction), + .procname = "percpu_pagelist_high_fraction", + .data = &percpu_pagelist_high_fraction, + .maxlen = sizeof(percpu_pagelist_high_fraction), .mode = 0644, - .proc_handler = percpu_pagelist_fraction_sysctl_handler, + .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, .extra1 = SYSCTL_ZERO, }, { @@ -2951,14 +2963,6 @@ static struct ctl_table vm_table[] = { .proc_handler = proc_dointvec_jiffies, }, { - .procname = "block_dump", - .data = &block_dump, - .maxlen = sizeof(block_dump), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, - }, - { .procname = "vfs_cache_pressure", .data = &sysctl_vfs_cache_pressure, .maxlen = sizeof(sysctl_vfs_cache_pressure), diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index 83e158d016ba..04bfd62f5e5c 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig @@ -64,6 +64,15 @@ config LEGACY_TIMER_TICK lack support for the generic clockevent framework. New platforms should use generic clockevents instead. +config TIME_KUNIT_TEST + tristate "KUnit test for kernel/time functions" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + Enable this option to test RTC library functions. + + If unsure, say N. + if GENERIC_CLOCKEVENTS menu "Timers subsystem" @@ -117,13 +126,14 @@ config NO_HZ_FULL the task mostly runs in userspace and has few kernel activity. You need to fill up the nohz_full boot parameter with the - desired range of dynticks CPUs. + desired range of dynticks CPUs to use it. This is implemented at + the expense of some overhead in user <-> kernel transitions: + syscalls, exceptions and interrupts. - This is implemented at the expense of some overhead in user <-> kernel - transitions: syscalls, exceptions and interrupts. Even when it's - dynamically off. + By default, without passing the nohz_full parameter, this behaves just + like NO_HZ_IDLE. - Say N. + If you're a distro say Y. endchoice diff --git a/kernel/time/Makefile b/kernel/time/Makefile index 1fb1c1ef6a19..7e875e63ff3b 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile @@ -21,3 +21,5 @@ obj-$(CONFIG_HAVE_GENERIC_VDSO) += vsyscall.o obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o obj-$(CONFIG_TEST_UDELAY) += test_udelay.o obj-$(CONFIG_TIME_NS) += namespace.o +obj-$(CONFIG_TEST_CLOCKSOURCE_WATCHDOG) += clocksource-wdtest.o +obj-$(CONFIG_TIME_KUNIT_TEST) += time_test.o diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index f5490222e134..003ccf338d20 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -347,8 +347,7 @@ static void clockevents_notify_released(void) while (!list_empty(&clockevents_released)) { dev = list_entry(clockevents_released.next, struct clock_event_device, list); - list_del(&dev->list); - list_add(&dev->list, &clockevent_devices); + list_move(&dev->list, &clockevent_devices); tick_check_new_device(dev); } } @@ -576,8 +575,7 @@ void clockevents_exchange_device(struct clock_event_device *old, if (old) { module_put(old->owner); clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED); - list_del(&old->list); - list_add(&old->list, &clockevents_released); + list_move(&old->list, &clockevents_released); } if (new) { @@ -629,6 +627,7 @@ void tick_offline_cpu(unsigned int cpu) /** * tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu + * @cpu: The dead CPU */ void tick_cleanup_dead_cpu(int cpu) { @@ -668,9 +667,9 @@ static struct bus_type clockevents_subsys = { static DEFINE_PER_CPU(struct device, tick_percpu_dev); static struct tick_device *tick_get_tick_dev(struct device *dev); -static ssize_t sysfs_show_current_tick_dev(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t current_device_show(struct device *dev, + struct device_attribute *attr, + char *buf) { struct tick_device *td; ssize_t count = 0; @@ -682,12 +681,12 @@ static ssize_t sysfs_show_current_tick_dev(struct device *dev, raw_spin_unlock_irq(&clockevents_lock); return count; } -static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL); +static DEVICE_ATTR_RO(current_device); /* We don't support the abomination of removable broadcast devices */ -static ssize_t sysfs_unbind_tick_dev(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t unbind_device_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { char name[CS_NAME_LEN]; ssize_t ret = sysfs_get_uname(buf, name, count); @@ -714,7 +713,7 @@ static ssize_t sysfs_unbind_tick_dev(struct device *dev, mutex_unlock(&clockevents_mutex); return ret ? ret : count; } -static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev); +static DEVICE_ATTR_WO(unbind_device); #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST static struct device tick_bc_dev = { diff --git a/kernel/time/clocksource-wdtest.c b/kernel/time/clocksource-wdtest.c new file mode 100644 index 000000000000..01df12395c0e --- /dev/null +++ b/kernel/time/clocksource-wdtest.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Unit test for the clocksource watchdog. + * + * Copyright (C) 2021 Facebook, Inc. + * + * Author: Paul E. McKenney <paulmck@kernel.org> + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/device.h> +#include <linux/clocksource.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ +#include <linux/tick.h> +#include <linux/kthread.h> +#include <linux/delay.h> +#include <linux/prandom.h> +#include <linux/cpu.h> + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Paul E. McKenney <paulmck@kernel.org>"); + +static int holdoff = IS_BUILTIN(CONFIG_TEST_CLOCKSOURCE_WATCHDOG) ? 10 : 0; +module_param(holdoff, int, 0444); +MODULE_PARM_DESC(holdoff, "Time to wait to start test (s)."); + +/* Watchdog kthread's task_struct pointer for debug purposes. */ +static struct task_struct *wdtest_task; + +static u64 wdtest_jiffies_read(struct clocksource *cs) +{ + return (u64)jiffies; +} + +/* Assume HZ > 100. */ +#define JIFFIES_SHIFT 8 + +static struct clocksource clocksource_wdtest_jiffies = { + .name = "wdtest-jiffies", + .rating = 1, /* lowest valid rating*/ + .uncertainty_margin = TICK_NSEC, + .read = wdtest_jiffies_read, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_MUST_VERIFY, + .mult = TICK_NSEC << JIFFIES_SHIFT, /* details above */ + .shift = JIFFIES_SHIFT, + .max_cycles = 10, +}; + +static int wdtest_ktime_read_ndelays; +static bool wdtest_ktime_read_fuzz; + +static u64 wdtest_ktime_read(struct clocksource *cs) +{ + int wkrn = READ_ONCE(wdtest_ktime_read_ndelays); + static int sign = 1; + u64 ret; + + if (wkrn) { + udelay(cs->uncertainty_margin / 250); + WRITE_ONCE(wdtest_ktime_read_ndelays, wkrn - 1); + } + ret = ktime_get_real_fast_ns(); + if (READ_ONCE(wdtest_ktime_read_fuzz)) { + sign = -sign; + ret = ret + sign * 100 * NSEC_PER_MSEC; + } + return ret; +} + +static void wdtest_ktime_cs_mark_unstable(struct clocksource *cs) +{ + pr_info("--- Marking %s unstable due to clocksource watchdog.\n", cs->name); +} + +#define KTIME_FLAGS (CLOCK_SOURCE_IS_CONTINUOUS | \ + CLOCK_SOURCE_VALID_FOR_HRES | \ + CLOCK_SOURCE_MUST_VERIFY | \ + CLOCK_SOURCE_VERIFY_PERCPU) + +static struct clocksource clocksource_wdtest_ktime = { + .name = "wdtest-ktime", + .rating = 300, + .read = wdtest_ktime_read, + .mask = CLOCKSOURCE_MASK(64), + .flags = KTIME_FLAGS, + .mark_unstable = wdtest_ktime_cs_mark_unstable, + .list = LIST_HEAD_INIT(clocksource_wdtest_ktime.list), +}; + +/* Reset the clocksource if needed. */ +static void wdtest_ktime_clocksource_reset(void) +{ + if (clocksource_wdtest_ktime.flags & CLOCK_SOURCE_UNSTABLE) { + clocksource_unregister(&clocksource_wdtest_ktime); + clocksource_wdtest_ktime.flags = KTIME_FLAGS; + schedule_timeout_uninterruptible(HZ / 10); + clocksource_register_khz(&clocksource_wdtest_ktime, 1000 * 1000); + } +} + +/* Run the specified series of watchdog tests. */ +static int wdtest_func(void *arg) +{ + unsigned long j1, j2; + char *s; + int i; + + schedule_timeout_uninterruptible(holdoff * HZ); + + /* + * Verify that jiffies-like clocksources get the manually + * specified uncertainty margin. + */ + pr_info("--- Verify jiffies-like uncertainty margin.\n"); + __clocksource_register(&clocksource_wdtest_jiffies); + WARN_ON_ONCE(clocksource_wdtest_jiffies.uncertainty_margin != TICK_NSEC); + + j1 = clocksource_wdtest_jiffies.read(&clocksource_wdtest_jiffies); + schedule_timeout_uninterruptible(HZ); + j2 = clocksource_wdtest_jiffies.read(&clocksource_wdtest_jiffies); + WARN_ON_ONCE(j1 == j2); + + clocksource_unregister(&clocksource_wdtest_jiffies); + + /* + * Verify that tsc-like clocksources are assigned a reasonable + * uncertainty margin. + */ + pr_info("--- Verify tsc-like uncertainty margin.\n"); + clocksource_register_khz(&clocksource_wdtest_ktime, 1000 * 1000); + WARN_ON_ONCE(clocksource_wdtest_ktime.uncertainty_margin < NSEC_PER_USEC); + + j1 = clocksource_wdtest_ktime.read(&clocksource_wdtest_ktime); + udelay(1); + j2 = clocksource_wdtest_ktime.read(&clocksource_wdtest_ktime); + pr_info("--- tsc-like times: %lu - %lu = %lu.\n", j2, j1, j2 - j1); + WARN_ON_ONCE(time_before(j2, j1 + NSEC_PER_USEC)); + + /* Verify tsc-like stability with various numbers of errors injected. */ + for (i = 0; i <= max_cswd_read_retries + 1; i++) { + if (i <= 1 && i < max_cswd_read_retries) + s = ""; + else if (i <= max_cswd_read_retries) + s = ", expect message"; + else + s = ", expect clock skew"; + pr_info("--- Watchdog with %dx error injection, %lu retries%s.\n", i, max_cswd_read_retries, s); + WRITE_ONCE(wdtest_ktime_read_ndelays, i); + schedule_timeout_uninterruptible(2 * HZ); + WARN_ON_ONCE(READ_ONCE(wdtest_ktime_read_ndelays)); + WARN_ON_ONCE((i <= max_cswd_read_retries) != + !(clocksource_wdtest_ktime.flags & CLOCK_SOURCE_UNSTABLE)); + wdtest_ktime_clocksource_reset(); + } + + /* Verify tsc-like stability with clock-value-fuzz error injection. */ + pr_info("--- Watchdog clock-value-fuzz error injection, expect clock skew and per-CPU mismatches.\n"); + WRITE_ONCE(wdtest_ktime_read_fuzz, true); + schedule_timeout_uninterruptible(2 * HZ); + WARN_ON_ONCE(!(clocksource_wdtest_ktime.flags & CLOCK_SOURCE_UNSTABLE)); + clocksource_verify_percpu(&clocksource_wdtest_ktime); + WRITE_ONCE(wdtest_ktime_read_fuzz, false); + + clocksource_unregister(&clocksource_wdtest_ktime); + + pr_info("--- Done with test.\n"); + return 0; +} + +static void wdtest_print_module_parms(void) +{ + pr_alert("--- holdoff=%d\n", holdoff); +} + +/* Cleanup function. */ +static void clocksource_wdtest_cleanup(void) +{ +} + +static int __init clocksource_wdtest_init(void) +{ + int ret = 0; + + wdtest_print_module_parms(); + + /* Create watchdog-test task. */ + wdtest_task = kthread_run(wdtest_func, NULL, "wdtest"); + if (IS_ERR(wdtest_task)) { + ret = PTR_ERR(wdtest_task); + pr_warn("%s: Failed to create wdtest kthread.\n", __func__); + wdtest_task = NULL; + return ret; + } + + return 0; +} + +module_init(clocksource_wdtest_init); +module_exit(clocksource_wdtest_cleanup); diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 2cd902592fc1..b89c76e1c02c 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -14,6 +14,8 @@ #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ #include <linux/tick.h> #include <linux/kthread.h> +#include <linux/prandom.h> +#include <linux/cpu.h> #include "tick-internal.h" #include "timekeeping_internal.h" @@ -93,6 +95,20 @@ static char override_name[CS_NAME_LEN]; static int finished_booting; static u64 suspend_start; +/* + * Threshold: 0.0312s, when doubled: 0.0625s. + * Also a default for cs->uncertainty_margin when registering clocks. + */ +#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 5) + +/* + * Maximum permissible delay between two readouts of the watchdog + * clocksource surrounding a read of the clocksource being validated. + * This delay could be due to SMIs, NMIs, or to VCPU preemptions. Used as + * a lower bound for cs->uncertainty_margin values when registering clocks. + */ +#define WATCHDOG_MAX_SKEW (50 * NSEC_PER_USEC) + #ifdef CONFIG_CLOCKSOURCE_WATCHDOG static void clocksource_watchdog_work(struct work_struct *work); static void clocksource_select(void); @@ -119,10 +135,9 @@ static int clocksource_watchdog_kthread(void *data); static void __clocksource_change_rating(struct clocksource *cs, int rating); /* - * Interval: 0.5sec Threshold: 0.0625s + * Interval: 0.5sec. */ #define WATCHDOG_INTERVAL (HZ >> 1) -#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) static void clocksource_watchdog_work(struct work_struct *work) { @@ -184,12 +199,164 @@ void clocksource_mark_unstable(struct clocksource *cs) spin_unlock_irqrestore(&watchdog_lock, flags); } +ulong max_cswd_read_retries = 3; +module_param(max_cswd_read_retries, ulong, 0644); +EXPORT_SYMBOL_GPL(max_cswd_read_retries); +static int verify_n_cpus = 8; +module_param(verify_n_cpus, int, 0644); + +static bool cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) +{ + unsigned int nretries; + u64 wd_end, wd_delta; + int64_t wd_delay; + + for (nretries = 0; nretries <= max_cswd_read_retries; nretries++) { + local_irq_disable(); + *wdnow = watchdog->read(watchdog); + *csnow = cs->read(cs); + wd_end = watchdog->read(watchdog); + local_irq_enable(); + + wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask); + wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, + watchdog->shift); + if (wd_delay <= WATCHDOG_MAX_SKEW) { + if (nretries > 1 || nretries >= max_cswd_read_retries) { + pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n", + smp_processor_id(), watchdog->name, nretries); + } + return true; + } + } + + pr_warn("timekeeping watchdog on CPU%d: %s read-back delay of %lldns, attempt %d, marking unstable\n", + smp_processor_id(), watchdog->name, wd_delay, nretries); + return false; +} + +static u64 csnow_mid; +static cpumask_t cpus_ahead; +static cpumask_t cpus_behind; +static cpumask_t cpus_chosen; + +static void clocksource_verify_choose_cpus(void) +{ + int cpu, i, n = verify_n_cpus; + + if (n < 0) { + /* Check all of the CPUs. */ + cpumask_copy(&cpus_chosen, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &cpus_chosen); + return; + } + + /* If no checking desired, or no other CPU to check, leave. */ + cpumask_clear(&cpus_chosen); + if (n == 0 || num_online_cpus() <= 1) + return; + + /* Make sure to select at least one CPU other than the current CPU. */ + cpu = cpumask_next(-1, cpu_online_mask); + if (cpu == smp_processor_id()) + cpu = cpumask_next(cpu, cpu_online_mask); + if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) + return; + cpumask_set_cpu(cpu, &cpus_chosen); + + /* Force a sane value for the boot parameter. */ + if (n > nr_cpu_ids) + n = nr_cpu_ids; + + /* + * Randomly select the specified number of CPUs. If the same + * CPU is selected multiple times, that CPU is checked only once, + * and no replacement CPU is selected. This gracefully handles + * situations where verify_n_cpus is greater than the number of + * CPUs that are currently online. + */ + for (i = 1; i < n; i++) { + cpu = prandom_u32() % nr_cpu_ids; + cpu = cpumask_next(cpu - 1, cpu_online_mask); + if (cpu >= nr_cpu_ids) + cpu = cpumask_next(-1, cpu_online_mask); + if (!WARN_ON_ONCE(cpu >= nr_cpu_ids)) + cpumask_set_cpu(cpu, &cpus_chosen); + } + + /* Don't verify ourselves. */ + cpumask_clear_cpu(smp_processor_id(), &cpus_chosen); +} + +static void clocksource_verify_one_cpu(void *csin) +{ + struct clocksource *cs = (struct clocksource *)csin; + + csnow_mid = cs->read(cs); +} + +void clocksource_verify_percpu(struct clocksource *cs) +{ + int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX; + u64 csnow_begin, csnow_end; + int cpu, testcpu; + s64 delta; + + if (verify_n_cpus == 0) + return; + cpumask_clear(&cpus_ahead); + cpumask_clear(&cpus_behind); + get_online_cpus(); + preempt_disable(); + clocksource_verify_choose_cpus(); + if (cpumask_weight(&cpus_chosen) == 0) { + preempt_enable(); + put_online_cpus(); + pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name); + return; + } + testcpu = smp_processor_id(); + pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu, cpumask_pr_args(&cpus_chosen)); + for_each_cpu(cpu, &cpus_chosen) { + if (cpu == testcpu) + continue; + csnow_begin = cs->read(cs); + smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1); + csnow_end = cs->read(cs); + delta = (s64)((csnow_mid - csnow_begin) & cs->mask); + if (delta < 0) + cpumask_set_cpu(cpu, &cpus_behind); + delta = (csnow_end - csnow_mid) & cs->mask; + if (delta < 0) + cpumask_set_cpu(cpu, &cpus_ahead); + delta = clocksource_delta(csnow_end, csnow_begin, cs->mask); + cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); + if (cs_nsec > cs_nsec_max) + cs_nsec_max = cs_nsec; + if (cs_nsec < cs_nsec_min) + cs_nsec_min = cs_nsec; + } + preempt_enable(); + put_online_cpus(); + if (!cpumask_empty(&cpus_ahead)) + pr_warn(" CPUs %*pbl ahead of CPU %d for clocksource %s.\n", + cpumask_pr_args(&cpus_ahead), testcpu, cs->name); + if (!cpumask_empty(&cpus_behind)) + pr_warn(" CPUs %*pbl behind CPU %d for clocksource %s.\n", + cpumask_pr_args(&cpus_behind), testcpu, cs->name); + if (!cpumask_empty(&cpus_ahead) || !cpumask_empty(&cpus_behind)) + pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n", + testcpu, cs_nsec_min, cs_nsec_max, cs->name); +} +EXPORT_SYMBOL_GPL(clocksource_verify_percpu); + static void clocksource_watchdog(struct timer_list *unused) { - struct clocksource *cs; u64 csnow, wdnow, cslast, wdlast, delta; - int64_t wd_nsec, cs_nsec; int next_cpu, reset_pending; + int64_t wd_nsec, cs_nsec; + struct clocksource *cs; + u32 md; spin_lock(&watchdog_lock); if (!watchdog_running) @@ -206,10 +373,11 @@ static void clocksource_watchdog(struct timer_list *unused) continue; } - local_irq_disable(); - csnow = cs->read(cs); - wdnow = watchdog->read(watchdog); - local_irq_enable(); + if (!cs_watchdog_read(cs, &csnow, &wdnow)) { + /* Clock readout unreliable, so give it up. */ + __clocksource_unstable(cs); + continue; + } /* Clocksource initialized ? */ if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || @@ -235,13 +403,20 @@ static void clocksource_watchdog(struct timer_list *unused) continue; /* Check the deviation from the watchdog clocksource. */ - if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { + md = cs->uncertainty_margin + watchdog->uncertainty_margin; + if (abs(cs_nsec - wd_nsec) > md) { pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n", smp_processor_id(), cs->name); - pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", - watchdog->name, wdnow, wdlast, watchdog->mask); - pr_warn(" '%s' cs_now: %llx cs_last: %llx mask: %llx\n", - cs->name, csnow, cslast, cs->mask); + pr_warn(" '%s' wd_nsec: %lld wd_now: %llx wd_last: %llx mask: %llx\n", + watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask); + pr_warn(" '%s' cs_nsec: %lld cs_now: %llx cs_last: %llx mask: %llx\n", + cs->name, cs_nsec, csnow, cslast, cs->mask); + if (curr_clocksource == cs) + pr_warn(" '%s' is current clocksource.\n", cs->name); + else if (curr_clocksource) + pr_warn(" '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name); + else + pr_warn(" No current clocksource.\n"); __clocksource_unstable(cs); continue; } @@ -407,6 +582,12 @@ static int __clocksource_watchdog_kthread(void) unsigned long flags; int select = 0; + /* Do any required per-CPU skew verification. */ + if (curr_clocksource && + curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE && + curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU) + clocksource_verify_percpu(curr_clocksource); + spin_lock_irqsave(&watchdog_lock, flags); list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { if (cs->flags & CLOCK_SOURCE_UNSTABLE) { @@ -876,6 +1057,26 @@ void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, NSEC_PER_SEC / scale, sec * scale); } + + /* + * If the uncertainty margin is not specified, calculate it. + * If both scale and freq are non-zero, calculate the clock + * period, but bound below at 2*WATCHDOG_MAX_SKEW. However, + * if either of scale or freq is zero, be very conservative and + * take the tens-of-milliseconds WATCHDOG_THRESHOLD value for the + * uncertainty margin. Allow stupidly small uncertainty margins + * to be specified by the caller for testing purposes, but warn + * to discourage production use of this capability. + */ + if (scale && freq && !cs->uncertainty_margin) { + cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq); + if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW) + cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW; + } else if (!cs->uncertainty_margin) { + cs->uncertainty_margin = WATCHDOG_THRESHOLD; + } + WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW); + /* * Ensure clocksources that have large 'mult' values don't overflow * when adjusted. diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index a492e4da69ba..01935aafdb46 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -49,13 +49,14 @@ static u64 jiffies_read(struct clocksource *cs) * for "tick-less" systems. */ static struct clocksource clocksource_jiffies = { - .name = "jiffies", - .rating = 1, /* lowest valid rating*/ - .read = jiffies_read, - .mask = CLOCKSOURCE_MASK(32), - .mult = TICK_NSEC << JIFFIES_SHIFT, /* details above */ - .shift = JIFFIES_SHIFT, - .max_cycles = 10, + .name = "jiffies", + .rating = 1, /* lowest valid rating*/ + .uncertainty_margin = 32 * NSEC_PER_MSEC, + .read = jiffies_read, + .mask = CLOCKSOURCE_MASK(32), + .mult = TICK_NSEC << JIFFIES_SHIFT, /* details above */ + .shift = JIFFIES_SHIFT, + .max_cycles = 10, }; __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock); diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 3bb96a8b49c9..29a5e54e6e10 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -523,7 +523,7 @@ static void arm_timer(struct k_itimer *timer, struct task_struct *p) if (CPUCLOCK_PERTHREAD(timer->it_clock)) tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER); else - tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER); + tick_dep_set_signal(p, TICK_DEP_BIT_POSIX_TIMER); } /* @@ -1358,7 +1358,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid, if (*newval < *nextevt) *nextevt = *newval; - tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER); + tick_dep_set_signal(tsk, TICK_DEP_BIT_POSIX_TIMER); } static int do_cpu_nanosleep(const clockid_t which_clock, int flags, diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index a44055228796..f7fe6fe36173 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -33,6 +33,8 @@ static int tick_broadcast_forced; static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock); #ifdef CONFIG_TICK_ONESHOT +static DEFINE_PER_CPU(struct clock_event_device *, tick_oneshot_wakeup_device); + static void tick_broadcast_setup_oneshot(struct clock_event_device *bc); static void tick_broadcast_clear_oneshot(int cpu); static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); @@ -61,6 +63,13 @@ struct cpumask *tick_get_broadcast_mask(void) return tick_broadcast_mask; } +static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu); + +const struct clock_event_device *tick_get_wakeup_device(int cpu) +{ + return tick_get_oneshot_wakeup_device(cpu); +} + /* * Start the device in periodic mode */ @@ -88,13 +97,75 @@ static bool tick_check_broadcast_device(struct clock_event_device *curdev, return !curdev || newdev->rating > curdev->rating; } +#ifdef CONFIG_TICK_ONESHOT +static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu) +{ + return per_cpu(tick_oneshot_wakeup_device, cpu); +} + +static void tick_oneshot_wakeup_handler(struct clock_event_device *wd) +{ + /* + * If we woke up early and the tick was reprogrammed in the + * meantime then this may be spurious but harmless. + */ + tick_receive_broadcast(); +} + +static bool tick_set_oneshot_wakeup_device(struct clock_event_device *newdev, + int cpu) +{ + struct clock_event_device *curdev = tick_get_oneshot_wakeup_device(cpu); + + if (!newdev) + goto set_device; + + if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) || + (newdev->features & CLOCK_EVT_FEAT_C3STOP)) + return false; + + if (!(newdev->features & CLOCK_EVT_FEAT_PERCPU) || + !(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) + return false; + + if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) + return false; + + if (curdev && newdev->rating <= curdev->rating) + return false; + + if (!try_module_get(newdev->owner)) + return false; + + newdev->event_handler = tick_oneshot_wakeup_handler; +set_device: + clockevents_exchange_device(curdev, newdev); + per_cpu(tick_oneshot_wakeup_device, cpu) = newdev; + return true; +} +#else +static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu) +{ + return NULL; +} + +static bool tick_set_oneshot_wakeup_device(struct clock_event_device *newdev, + int cpu) +{ + return false; +} +#endif + /* * Conditionally install/replace broadcast device */ -void tick_install_broadcast_device(struct clock_event_device *dev) +void tick_install_broadcast_device(struct clock_event_device *dev, int cpu) { struct clock_event_device *cur = tick_broadcast_device.evtdev; + if (tick_set_oneshot_wakeup_device(dev, cpu)) + return; + if (!tick_check_broadcast_device(cur, dev)) return; @@ -253,7 +324,6 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) return ret; } -#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST int tick_receive_broadcast(void) { struct tick_device *td = this_cpu_ptr(&tick_cpu_device); @@ -268,7 +338,6 @@ int tick_receive_broadcast(void) evt->event_handler(evt); return 0; } -#endif /* * Broadcast the event to the cpus, which are set in the mask (mangled). @@ -719,24 +788,16 @@ static void broadcast_shutdown_local(struct clock_event_device *bc, clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); } -int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) +static int ___tick_broadcast_oneshot_control(enum tick_broadcast_state state, + struct tick_device *td, + int cpu) { - struct clock_event_device *bc, *dev; - int cpu, ret = 0; + struct clock_event_device *bc, *dev = td->evtdev; + int ret = 0; ktime_t now; - /* - * If there is no broadcast device, tell the caller not to go - * into deep idle. - */ - if (!tick_broadcast_device.evtdev) - return -EBUSY; - - dev = this_cpu_ptr(&tick_cpu_device)->evtdev; - raw_spin_lock(&tick_broadcast_lock); bc = tick_broadcast_device.evtdev; - cpu = smp_processor_id(); if (state == TICK_BROADCAST_ENTER) { /* @@ -865,6 +926,53 @@ out: return ret; } +static int tick_oneshot_wakeup_control(enum tick_broadcast_state state, + struct tick_device *td, + int cpu) +{ + struct clock_event_device *dev, *wd; + + dev = td->evtdev; + if (td->mode != TICKDEV_MODE_ONESHOT) + return -EINVAL; + + wd = tick_get_oneshot_wakeup_device(cpu); + if (!wd) + return -ENODEV; + + switch (state) { + case TICK_BROADCAST_ENTER: + clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT_STOPPED); + clockevents_switch_state(wd, CLOCK_EVT_STATE_ONESHOT); + clockevents_program_event(wd, dev->next_event, 1); + break; + case TICK_BROADCAST_EXIT: + /* We may have transitioned to oneshot mode while idle */ + if (clockevent_get_state(wd) != CLOCK_EVT_STATE_ONESHOT) + return -ENODEV; + } + + return 0; +} + +int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) +{ + struct tick_device *td = this_cpu_ptr(&tick_cpu_device); + int cpu = smp_processor_id(); + + if (!tick_oneshot_wakeup_control(state, td, cpu)) + return 0; + + if (tick_broadcast_device.evtdev) + return ___tick_broadcast_oneshot_control(state, td, cpu); + + /* + * If there is no broadcast or wakeup device, tell the caller not + * to go into deep idle. + */ + return -EBUSY; +} + /* * Reset the one shot broadcast for a cpu * @@ -991,6 +1099,9 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu) */ static void tick_broadcast_oneshot_offline(unsigned int cpu) { + if (tick_get_oneshot_wakeup_device(cpu)) + tick_set_oneshot_wakeup_device(NULL, cpu); + /* * Clear the broadcast masks for the dead cpu, but do not stop * the broadcast device! diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index e15bc0ef1912..d663249652ef 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -373,7 +373,7 @@ out_bc: /* * Can the new device be used as a broadcast device ? */ - tick_install_broadcast_device(newdev); + tick_install_broadcast_device(newdev, cpu); } /** diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 7a981c9e87a4..6a742a29e545 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -61,7 +61,7 @@ extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt); /* Broadcasting support */ # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu); -extern void tick_install_broadcast_device(struct clock_event_device *dev); +extern void tick_install_broadcast_device(struct clock_event_device *dev, int cpu); extern int tick_is_broadcast_device(struct clock_event_device *dev); extern void tick_suspend_broadcast(void); extern void tick_resume_broadcast(void); @@ -71,8 +71,9 @@ extern void tick_set_periodic_handler(struct clock_event_device *dev, int broadc extern int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq); extern struct tick_device *tick_get_broadcast_device(void); extern struct cpumask *tick_get_broadcast_mask(void); +extern const struct clock_event_device *tick_get_wakeup_device(int cpu); # else /* !CONFIG_GENERIC_CLOCKEVENTS_BROADCAST: */ -static inline void tick_install_broadcast_device(struct clock_event_device *dev) { } +static inline void tick_install_broadcast_device(struct clock_event_device *dev, int cpu) { } static inline int tick_is_broadcast_device(struct clock_event_device *dev) { return 0; } static inline int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) { return 0; } static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 828b091501ca..6bffe5af8cb1 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -230,6 +230,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) #ifdef CONFIG_NO_HZ_FULL cpumask_var_t tick_nohz_full_mask; +EXPORT_SYMBOL_GPL(tick_nohz_full_mask); bool tick_nohz_full_running; EXPORT_SYMBOL_GPL(tick_nohz_full_running); static atomic_t tick_dep_mask; @@ -322,6 +323,46 @@ void tick_nohz_full_kick_cpu(int cpu) irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); } +static void tick_nohz_kick_task(struct task_struct *tsk) +{ + int cpu; + + /* + * If the task is not running, run_posix_cpu_timers() + * has nothing to elapse, IPI can then be spared. + * + * activate_task() STORE p->tick_dep_mask + * STORE p->on_rq + * __schedule() (switch to task 'p') smp_mb() (atomic_fetch_or()) + * LOCK rq->lock LOAD p->on_rq + * smp_mb__after_spin_lock() + * tick_nohz_task_switch() + * LOAD p->tick_dep_mask + */ + if (!sched_task_on_rq(tsk)) + return; + + /* + * If the task concurrently migrates to another CPU, + * we guarantee it sees the new tick dependency upon + * schedule. + * + * set_task_cpu(p, cpu); + * STORE p->cpu = @cpu + * __schedule() (switch to task 'p') + * LOCK rq->lock + * smp_mb__after_spin_lock() STORE p->tick_dep_mask + * tick_nohz_task_switch() smp_mb() (atomic_fetch_or()) + * LOAD p->tick_dep_mask LOAD p->cpu + */ + cpu = task_cpu(tsk); + + preempt_disable(); + if (cpu_online(cpu)) + tick_nohz_full_kick_cpu(cpu); + preempt_enable(); +} + /* * Kick all full dynticks CPUs in order to force these to re-evaluate * their dependency on the tick and restart it if necessary. @@ -404,19 +445,8 @@ EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu); */ void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) { - if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) { - if (tsk == current) { - preempt_disable(); - tick_nohz_full_kick(); - preempt_enable(); - } else { - /* - * Some future tick_nohz_full_kick_task() - * should optimize this. - */ - tick_nohz_full_kick_all(); - } - } + if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) + tick_nohz_kick_task(tsk); } EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task); @@ -430,9 +460,20 @@ EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task); * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse * per process timers. */ -void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit) +void tick_nohz_dep_set_signal(struct task_struct *tsk, + enum tick_dep_bits bit) { - tick_nohz_dep_set_all(&sig->tick_dep_mask, bit); + int prev; + struct signal_struct *sig = tsk->signal; + + prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask); + if (!prev) { + struct task_struct *t; + + lockdep_assert_held(&tsk->sighand->siglock); + __for_each_thread(sig, t) + tick_nohz_kick_task(t); + } } void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) @@ -447,13 +488,10 @@ void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bi */ void __tick_nohz_task_switch(void) { - unsigned long flags; struct tick_sched *ts; - local_irq_save(flags); - if (!tick_nohz_full_cpu(smp_processor_id())) - goto out; + return; ts = this_cpu_ptr(&tick_cpu_sched); @@ -462,8 +500,6 @@ void __tick_nohz_task_switch(void) atomic_read(¤t->signal->tick_dep_mask)) tick_nohz_full_kick(); } -out: - local_irq_restore(flags); } /* Get the boot-time nohz CPU list from the kernel parameters. */ @@ -921,27 +957,31 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) * Cancel the scheduled timer and restore the tick */ ts->tick_stopped = 0; - ts->idle_exittime = now; - tick_nohz_restart(ts, now); } -static void tick_nohz_full_update_tick(struct tick_sched *ts) +static void __tick_nohz_full_update_tick(struct tick_sched *ts, + ktime_t now) { #ifdef CONFIG_NO_HZ_FULL int cpu = smp_processor_id(); - if (!tick_nohz_full_cpu(cpu)) + if (can_stop_full_tick(cpu, ts)) + tick_nohz_stop_sched_tick(ts, cpu); + else if (ts->tick_stopped) + tick_nohz_restart_sched_tick(ts, now); +#endif +} + +static void tick_nohz_full_update_tick(struct tick_sched *ts) +{ + if (!tick_nohz_full_cpu(smp_processor_id())) return; if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) return; - if (can_stop_full_tick(cpu, ts)) - tick_nohz_stop_sched_tick(ts, cpu); - else if (ts->tick_stopped) - tick_nohz_restart_sched_tick(ts, ktime_get()); -#endif + __tick_nohz_full_update_tick(ts, ktime_get()); } static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) @@ -1188,11 +1228,13 @@ unsigned long tick_nohz_get_idle_calls(void) return ts->idle_calls; } -static void tick_nohz_account_idle_ticks(struct tick_sched *ts) +static void tick_nohz_account_idle_time(struct tick_sched *ts, + ktime_t now) { -#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE unsigned long ticks; + ts->idle_exittime = now; + if (vtime_accounting_enabled_this_cpu()) return; /* @@ -1206,21 +1248,27 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts) */ if (ticks && ticks < LONG_MAX) account_idle_ticks(ticks); -#endif } -static void __tick_nohz_idle_restart_tick(struct tick_sched *ts, ktime_t now) +void tick_nohz_idle_restart_tick(void) { - tick_nohz_restart_sched_tick(ts, now); - tick_nohz_account_idle_ticks(ts); + struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); + + if (ts->tick_stopped) { + ktime_t now = ktime_get(); + tick_nohz_restart_sched_tick(ts, now); + tick_nohz_account_idle_time(ts, now); + } } -void tick_nohz_idle_restart_tick(void) +static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now) { - struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); + if (tick_nohz_full_cpu(smp_processor_id())) + __tick_nohz_full_update_tick(ts, now); + else + tick_nohz_restart_sched_tick(ts, now); - if (ts->tick_stopped) - __tick_nohz_idle_restart_tick(ts, ktime_get()); + tick_nohz_account_idle_time(ts, now); } /** @@ -1252,7 +1300,7 @@ void tick_nohz_idle_exit(void) tick_nohz_stop_idle(ts, now); if (tick_stopped) - __tick_nohz_idle_restart_tick(ts, now); + tick_nohz_idle_update_tick(ts, now); local_irq_enable(); } diff --git a/kernel/time/time_test.c b/kernel/time/time_test.c new file mode 100644 index 000000000000..831e8e779ace --- /dev/null +++ b/kernel/time/time_test.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: LGPL-2.1+ + +#include <kunit/test.h> +#include <linux/time.h> + +/* + * Traditional implementation of leap year evaluation. + */ +static bool is_leap(long year) +{ + return year % 4 == 0 && (year % 100 != 0 || year % 400 == 0); +} + +/* + * Gets the last day of a month. + */ +static int last_day_of_month(long year, int month) +{ + if (month == 2) + return 28 + is_leap(year); + if (month == 4 || month == 6 || month == 9 || month == 11) + return 30; + return 31; +} + +/* + * Advances a date by one day. + */ +static void advance_date(long *year, int *month, int *mday, int *yday) +{ + if (*mday != last_day_of_month(*year, *month)) { + ++*mday; + ++*yday; + return; + } + + *mday = 1; + if (*month != 12) { + ++*month; + ++*yday; + return; + } + + *month = 1; + *yday = 0; + ++*year; +} + +/* + * Checks every day in a 160000 years interval centered at 1970-01-01 + * against the expected result. + */ +static void time64_to_tm_test_date_range(struct kunit *test) +{ + /* + * 80000 years = (80000 / 400) * 400 years + * = (80000 / 400) * 146097 days + * = (80000 / 400) * 146097 * 86400 seconds + */ + time64_t total_secs = ((time64_t) 80000) / 400 * 146097 * 86400; + long year = 1970 - 80000; + int month = 1; + int mdday = 1; + int yday = 0; + + struct tm result; + time64_t secs; + s64 days; + + for (secs = -total_secs; secs <= total_secs; secs += 86400) { + + time64_to_tm(secs, 0, &result); + + days = div_s64(secs, 86400); + + #define FAIL_MSG "%05ld/%02d/%02d (%2d) : %ld", \ + year, month, mdday, yday, days + + KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG); + KUNIT_ASSERT_EQ_MSG(test, month - 1, result.tm_mon, FAIL_MSG); + KUNIT_ASSERT_EQ_MSG(test, mdday, result.tm_mday, FAIL_MSG); + KUNIT_ASSERT_EQ_MSG(test, yday, result.tm_yday, FAIL_MSG); + + advance_date(&year, &month, &mdday, &yday); + } +} + +static struct kunit_case time_test_cases[] = { + KUNIT_CASE(time64_to_tm_test_date_range), + {} +}; + +static struct kunit_suite time_test_suite = { + .name = "time_test_cases", + .test_cases = time_test_cases, +}; + +kunit_test_suite(time_test_suite); +MODULE_LICENSE("GPL"); diff --git a/kernel/time/timeconv.c b/kernel/time/timeconv.c index 62e3b46717a6..59b922c826e7 100644 --- a/kernel/time/timeconv.c +++ b/kernel/time/timeconv.c @@ -22,47 +22,16 @@ /* * Converts the calendar time to broken-down time representation - * Based on code from glibc-2.6 * * 2009-7-14: * Moved from glibc-2.6 to kernel by Zhaolei<zhaolei@cn.fujitsu.com> + * 2021-06-02: + * Reimplemented by Cassio Neri <cassio.neri@gmail.com> */ #include <linux/time.h> #include <linux/module.h> - -/* - * Nonzero if YEAR is a leap year (every 4 years, - * except every 100th isn't, and every 400th is). - */ -static int __isleap(long year) -{ - return (year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0); -} - -/* do a mathdiv for long type */ -static long math_div(long a, long b) -{ - return a / b - (a % b < 0); -} - -/* How many leap years between y1 and y2, y1 must less or equal to y2 */ -static long leaps_between(long y1, long y2) -{ - long leaps1 = math_div(y1 - 1, 4) - math_div(y1 - 1, 100) - + math_div(y1 - 1, 400); - long leaps2 = math_div(y2 - 1, 4) - math_div(y2 - 1, 100) - + math_div(y2 - 1, 400); - return leaps2 - leaps1; -} - -/* How many days come before each month (0-12). */ -static const unsigned short __mon_yday[2][13] = { - /* Normal years. */ - {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365}, - /* Leap years. */ - {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366} -}; +#include <linux/kernel.h> #define SECS_PER_HOUR (60 * 60) #define SECS_PER_DAY (SECS_PER_HOUR * 24) @@ -77,9 +46,11 @@ static const unsigned short __mon_yday[2][13] = { */ void time64_to_tm(time64_t totalsecs, int offset, struct tm *result) { - long days, rem, y; + u32 u32tmp, day_of_century, year_of_century, day_of_year, month, day; + u64 u64tmp, udays, century, year; + bool is_Jan_or_Feb, is_leap_year; + long days, rem; int remainder; - const unsigned short *ip; days = div_s64_rem(totalsecs, SECS_PER_DAY, &remainder); rem = remainder; @@ -103,27 +74,68 @@ void time64_to_tm(time64_t totalsecs, int offset, struct tm *result) if (result->tm_wday < 0) result->tm_wday += 7; - y = 1970; - - while (days < 0 || days >= (__isleap(y) ? 366 : 365)) { - /* Guess a corrected year, assuming 365 days per year. */ - long yg = y + math_div(days, 365); - - /* Adjust DAYS and Y to match the guessed year. */ - days -= (yg - y) * 365 + leaps_between(y, yg); - y = yg; - } - - result->tm_year = y - 1900; - - result->tm_yday = days; - - ip = __mon_yday[__isleap(y)]; - for (y = 11; days < ip[y]; y--) - continue; - days -= ip[y]; - - result->tm_mon = y; - result->tm_mday = days + 1; + /* + * The following algorithm is, basically, Proposition 6.3 of Neri + * and Schneider [1]. In a few words: it works on the computational + * (fictitious) calendar where the year starts in March, month = 2 + * (*), and finishes in February, month = 13. This calendar is + * mathematically convenient because the day of the year does not + * depend on whether the year is leap or not. For instance: + * + * March 1st 0-th day of the year; + * ... + * April 1st 31-st day of the year; + * ... + * January 1st 306-th day of the year; (Important!) + * ... + * February 28th 364-th day of the year; + * February 29th 365-th day of the year (if it exists). + * + * After having worked out the date in the computational calendar + * (using just arithmetics) it's easy to convert it to the + * corresponding date in the Gregorian calendar. + * + * [1] "Euclidean Affine Functions and Applications to Calendar + * Algorithms". https://arxiv.org/abs/2102.06959 + * + * (*) The numbering of months follows tm more closely and thus, + * is slightly different from [1]. + */ + + udays = ((u64) days) + 2305843009213814918ULL; + + u64tmp = 4 * udays + 3; + century = div64_u64_rem(u64tmp, 146097, &u64tmp); + day_of_century = (u32) (u64tmp / 4); + + u32tmp = 4 * day_of_century + 3; + u64tmp = 2939745ULL * u32tmp; + year_of_century = upper_32_bits(u64tmp); + day_of_year = lower_32_bits(u64tmp) / 2939745 / 4; + + year = 100 * century + year_of_century; + is_leap_year = year_of_century ? !(year_of_century % 4) : !(century % 4); + + u32tmp = 2141 * day_of_year + 132377; + month = u32tmp >> 16; + day = ((u16) u32tmp) / 2141; + + /* + * Recall that January 1st is the 306-th day of the year in the + * computational (not Gregorian) calendar. + */ + is_Jan_or_Feb = day_of_year >= 306; + + /* Convert to the Gregorian calendar and adjust to Unix time. */ + year = year + is_Jan_or_Feb - 6313183731940000ULL; + month = is_Jan_or_Feb ? month - 12 : month; + day = day + 1; + day_of_year += is_Jan_or_Feb ? -306 : 31 + 28 + is_leap_year; + + /* Convert to tm's format. */ + result->tm_year = (long) (year - 1900); + result->tm_mon = (int) month; + result->tm_mday = (int) day; + result->tm_yday = (int) day_of_year; } EXPORT_SYMBOL(time64_to_tm); diff --git a/kernel/time/timer.c b/kernel/time/timer.c index d111adf4a0cb..3fadb58fc9d7 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1237,20 +1237,6 @@ int try_to_del_timer_sync(struct timer_list *timer) } EXPORT_SYMBOL(try_to_del_timer_sync); -bool timer_curr_running(struct timer_list *timer) -{ - int i; - - for (i = 0; i < NR_BASES; i++) { - struct timer_base *base = this_cpu_ptr(&timer_bases[i]); - - if (base->running_timer == timer) - return true; - } - - return false; -} - #ifdef CONFIG_PREEMPT_RT static __init void timer_base_init_expiry_lock(struct timer_base *base) { @@ -1879,7 +1865,7 @@ signed long __sched schedule_timeout(signed long timeout) printk(KERN_ERR "schedule_timeout: wrong timeout " "value %lx\n", timeout); dump_stack(); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); goto out; } } diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 6939140ab7c5..ed7d6ad694fb 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -228,6 +228,14 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) SEQ_printf(m, " event_handler: %ps\n", dev->event_handler); SEQ_printf(m, "\n"); SEQ_printf(m, " retries: %lu\n", dev->retries); + +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST + if (cpu >= 0) { + const struct clock_event_device *wd = tick_get_wakeup_device(cpu); + + SEQ_printf(m, "Wakeup Device: %s\n", wd ? wd->name : "<NULL>"); + } +#endif SEQ_printf(m, "\n"); } @@ -248,7 +256,7 @@ static void timer_list_show_tickdevices_header(struct seq_file *m) static inline void timer_list_header(struct seq_file *m, u64 now) { - SEQ_printf(m, "Timer List Version: v0.8\n"); + SEQ_printf(m, "Timer List Version: v0.9\n"); SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); SEQ_printf(m, "\n"); diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 7fa82778c3e6..d567b1717c4c 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -356,6 +356,68 @@ config HWLAT_TRACER file. Every time a latency is greater than tracing_thresh, it will be recorded into the ring buffer. +config OSNOISE_TRACER + bool "OS Noise tracer" + select GENERIC_TRACER + help + In the context of high-performance computing (HPC), the Operating + System Noise (osnoise) refers to the interference experienced by an + application due to activities inside the operating system. In the + context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread + can cause noise to the system. Moreover, hardware-related jobs can + also cause noise, for example, via SMIs. + + The osnoise tracer leverages the hwlat_detector by running a similar + loop with preemption, SoftIRQs and IRQs enabled, thus allowing all + the sources of osnoise during its execution. The osnoise tracer takes + note of the entry and exit point of any source of interferences, + increasing a per-cpu interference counter. It saves an interference + counter for each source of interference. The interference counter for + NMI, IRQs, SoftIRQs, and threads is increased anytime the tool + observes these interferences' entry events. When a noise happens + without any interference from the operating system level, the + hardware noise counter increases, pointing to a hardware-related + noise. In this way, osnoise can account for any source of + interference. At the end of the period, the osnoise tracer prints + the sum of all noise, the max single noise, the percentage of CPU + available for the thread, and the counters for the noise sources. + + In addition to the tracer, a set of tracepoints were added to + facilitate the identification of the osnoise source. + + The output will appear in the trace and trace_pipe files. + + To enable this tracer, echo in "osnoise" into the current_tracer + file. + +config TIMERLAT_TRACER + bool "Timerlat tracer" + select OSNOISE_TRACER + select GENERIC_TRACER + help + The timerlat tracer aims to help the preemptive kernel developers + to find sources of wakeup latencies of real-time threads. + + The tracer creates a per-cpu kernel thread with real-time priority. + The tracer thread sets a periodic timer to wakeup itself, and goes + to sleep waiting for the timer to fire. At the wakeup, the thread + then computes a wakeup latency value as the difference between + the current time and the absolute time that the timer was set + to expire. + + The tracer prints two lines at every activation. The first is the + timer latency observed at the hardirq context before the + activation of the thread. The second is the timer latency observed + by the thread, which is the same level that cyclictest reports. The + ACTIVATION ID field serves to relate the irq execution to its + respective thread execution. + + The tracer is build on top of osnoise tracer, and the osnoise: + events can be used to trace the source of interference from NMI, + IRQs and other threads. It also enables the capture of the + stacktrace at the IRQ context, which helps to identify the code + path that can cause thread delay. + config MMIOTRACE bool "Memory mapped IO tracing" depends on HAVE_MMIOTRACE_SUPPORT && PCI diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index b28d3e5013cd..b1c47ccf4f73 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -58,6 +58,7 @@ obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o obj-$(CONFIG_HWLAT_TRACER) += trace_hwlat.o +obj-$(CONFIG_OSNOISE_TRACER) += trace_osnoise.o obj-$(CONFIG_NOP_TRACER) += trace_nop.o obj-$(CONFIG_STACK_TRACER) += trace_stack.o obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index d2d7cf6cfe83..b4916ef388ad 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -215,16 +215,11 @@ const struct bpf_func_proto bpf_probe_read_user_str_proto = { static __always_inline int bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) { - int ret = security_locked_down(LOCKDOWN_BPF_READ); + int ret; - if (unlikely(ret < 0)) - goto fail; ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); if (unlikely(ret < 0)) - goto fail; - return ret; -fail: - memset(dst, 0, size); + memset(dst, 0, size); return ret; } @@ -246,10 +241,7 @@ const struct bpf_func_proto bpf_probe_read_kernel_proto = { static __always_inline int bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) { - int ret = security_locked_down(LOCKDOWN_BPF_READ); - - if (unlikely(ret < 0)) - goto fail; + int ret; /* * The strncpy_from_kernel_nofault() call will likely not fill the @@ -262,11 +254,7 @@ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) */ ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size); if (unlikely(ret < 0)) - goto fail; - - return ret; -fail: - memset(dst, 0, size); + memset(dst, 0, size); return ret; } @@ -1011,20 +999,26 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_probe_read_user: return &bpf_probe_read_user_proto; case BPF_FUNC_probe_read_kernel: - return &bpf_probe_read_kernel_proto; + return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? + NULL : &bpf_probe_read_kernel_proto; case BPF_FUNC_probe_read_user_str: return &bpf_probe_read_user_str_proto; case BPF_FUNC_probe_read_kernel_str: - return &bpf_probe_read_kernel_str_proto; + return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? + NULL : &bpf_probe_read_kernel_str_proto; #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE case BPF_FUNC_probe_read: - return &bpf_probe_read_compat_proto; + return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? + NULL : &bpf_probe_read_compat_proto; case BPF_FUNC_probe_read_str: - return &bpf_probe_read_compat_str_proto; + return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? + NULL : &bpf_probe_read_compat_str_proto; #endif #ifdef CONFIG_CGROUPS case BPF_FUNC_get_current_cgroup_id: return &bpf_get_current_cgroup_id_proto; + case BPF_FUNC_get_current_ancestor_cgroup_id: + return &bpf_get_current_ancestor_cgroup_id_proto; #endif case BPF_FUNC_send_signal: return &bpf_send_signal_proto; @@ -1848,7 +1842,8 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog * if (prog->aux->max_tp_access > btp->writable_size) return -EINVAL; - return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog); + return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, + prog); } int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 2e8a3fde7104..72ef4dccbcc4 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1967,12 +1967,18 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, static void print_ip_ins(const char *fmt, const unsigned char *p) { + char ins[MCOUNT_INSN_SIZE]; int i; + if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) { + printk(KERN_CONT "%s[FAULT] %px\n", fmt, p); + return; + } + printk(KERN_CONT "%s", fmt); for (i = 0; i < MCOUNT_INSN_SIZE; i++) - printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); + printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]); } enum ftrace_bug_type ftrace_bug_type; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 2c0ee6484990..d1463eac11a3 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -3391,7 +3391,7 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, case RINGBUF_TYPE_PADDING: if (event->time_delta == 1) break; - /* fall through */ + fallthrough; case RINGBUF_TYPE_DATA: ts += event->time_delta; break; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index a21ef9cd2aae..f8b80b5bab71 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -39,6 +39,7 @@ #include <linux/slab.h> #include <linux/ctype.h> #include <linux/init.h> +#include <linux/panic_notifier.h> #include <linux/poll.h> #include <linux/nmi.h> #include <linux/fs.h> @@ -86,6 +87,7 @@ void __init disable_tracing_selftest(const char *reason) /* Pipe tracepoints to printk */ struct trace_iterator *tracepoint_print_iter; int tracepoint_printk; +static bool tracepoint_printk_stop_on_boot __initdata; static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key); /* For tracers that don't implement custom flags */ @@ -196,12 +198,12 @@ __setup("ftrace=", set_cmdline_ftrace); static int __init set_ftrace_dump_on_oops(char *str) { - if (*str++ != '=' || !*str) { + if (*str++ != '=' || !*str || !strcmp("1", str)) { ftrace_dump_on_oops = DUMP_ALL; return 1; } - if (!strcmp("orig_cpu", str)) { + if (!strcmp("orig_cpu", str) || !strcmp("2", str)) { ftrace_dump_on_oops = DUMP_ORIG; return 1; } @@ -256,6 +258,13 @@ static int __init set_tracepoint_printk(char *str) } __setup("tp_printk", set_tracepoint_printk); +static int __init set_tracepoint_printk_stop(char *str) +{ + tracepoint_printk_stop_on_boot = true; + return 1; +} +__setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop); + unsigned long long ns2usecs(u64 nsec) { nsec += 500; @@ -1682,8 +1691,7 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) unsigned long __read_mostly tracing_thresh; static const struct file_operations tracing_max_lat_fops; -#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \ - defined(CONFIG_FSNOTIFY) +#ifdef LATENCY_FS_NOTIFY static struct workqueue_struct *fsnotify_wq; @@ -2184,8 +2192,15 @@ void tracing_reset_all_online_cpus(void) } } +/* + * The tgid_map array maps from pid to tgid; i.e. the value stored at index i + * is the tgid last observed corresponding to pid=i. + */ static int *tgid_map; +/* The maximum valid index into tgid_map. */ +static size_t tgid_map_max; + #define SAVED_CMDLINES_DEFAULT 128 #define NO_CMDLINE_MAP UINT_MAX static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; @@ -2198,9 +2213,6 @@ struct saved_cmdlines_buffer { }; static struct saved_cmdlines_buffer *savedcmd; -/* temporary disable recording */ -static atomic_t trace_record_taskinfo_disabled __read_mostly; - static inline char *get_saved_cmdlines(int idx) { return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN]; @@ -2461,24 +2473,41 @@ void trace_find_cmdline(int pid, char comm[]) preempt_enable(); } +static int *trace_find_tgid_ptr(int pid) +{ + /* + * Pairs with the smp_store_release in set_tracer_flag() to ensure that + * if we observe a non-NULL tgid_map then we also observe the correct + * tgid_map_max. + */ + int *map = smp_load_acquire(&tgid_map); + + if (unlikely(!map || pid > tgid_map_max)) + return NULL; + + return &map[pid]; +} + int trace_find_tgid(int pid) { - if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT)) - return 0; + int *ptr = trace_find_tgid_ptr(pid); - return tgid_map[pid]; + return ptr ? *ptr : 0; } static int trace_save_tgid(struct task_struct *tsk) { + int *ptr; + /* treat recording of idle task as a success */ if (!tsk->pid) return 1; - if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT)) + ptr = trace_find_tgid_ptr(tsk->pid); + if (!ptr) return 0; - tgid_map[tsk->pid] = tsk->tgid; + *ptr = tsk->tgid; return 1; } @@ -2486,8 +2515,6 @@ static bool tracing_record_taskinfo_skip(int flags) { if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID)))) return true; - if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on()) - return true; if (!__this_cpu_read(trace_taskinfo_save)) return true; return false; @@ -2734,9 +2761,45 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb, if (!tr->no_filter_buffering_ref && (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) && (entry = this_cpu_read(trace_buffered_event))) { - /* Try to use the per cpu buffer first */ + /* + * Filtering is on, so try to use the per cpu buffer first. + * This buffer will simulate a ring_buffer_event, + * where the type_len is zero and the array[0] will + * hold the full length. + * (see include/linux/ring-buffer.h for details on + * how the ring_buffer_event is structured). + * + * Using a temp buffer during filtering and copying it + * on a matched filter is quicker than writing directly + * into the ring buffer and then discarding it when + * it doesn't match. That is because the discard + * requires several atomic operations to get right. + * Copying on match and doing nothing on a failed match + * is still quicker than no copy on match, but having + * to discard out of the ring buffer on a failed match. + */ + int max_len = PAGE_SIZE - struct_size(entry, array, 1); + val = this_cpu_inc_return(trace_buffered_event_cnt); - if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) { + + /* + * Preemption is disabled, but interrupts and NMIs + * can still come in now. If that happens after + * the above increment, then it will have to go + * back to the old method of allocating the event + * on the ring buffer, and if the filter fails, it + * will have to call ring_buffer_discard_commit() + * to remove it. + * + * Need to also check the unlikely case that the + * length is bigger than the temp buffer size. + * If that happens, then the reserve is pretty much + * guaranteed to fail, as the ring buffer currently + * only allows events less than a page. But that may + * change in the future, so let the ring buffer reserve + * handle the failure in that case. + */ + if (val == 1 && likely(len <= max_len)) { trace_event_setup(entry, type, trace_ctx); entry->array[0] = len; return entry; @@ -3998,9 +4061,6 @@ static void *s_start(struct seq_file *m, loff_t *pos) return ERR_PTR(-EBUSY); #endif - if (!iter->snapshot) - atomic_inc(&trace_record_taskinfo_disabled); - if (*pos != iter->pos) { iter->ent = NULL; iter->cpu = 0; @@ -4043,9 +4103,6 @@ static void s_stop(struct seq_file *m, void *p) return; #endif - if (!iter->snapshot) - atomic_dec(&trace_record_taskinfo_disabled); - trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); } @@ -5182,6 +5239,8 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) { + int *map; + if ((mask == TRACE_ITER_RECORD_TGID) || (mask == TRACE_ITER_RECORD_CMD)) lockdep_assert_held(&event_mutex); @@ -5204,10 +5263,19 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) trace_event_enable_cmd_record(enabled); if (mask == TRACE_ITER_RECORD_TGID) { - if (!tgid_map) - tgid_map = kvcalloc(PID_MAX_DEFAULT + 1, - sizeof(*tgid_map), - GFP_KERNEL); + if (!tgid_map) { + tgid_map_max = pid_max; + map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map), + GFP_KERNEL); + + /* + * Pairs with smp_load_acquire() in + * trace_find_tgid_ptr() to ensure that if it observes + * the tgid_map we just allocated then it also observes + * the corresponding tgid_map_max value. + */ + smp_store_release(&tgid_map, map); + } if (!tgid_map) { tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; return -ENOMEM; @@ -5619,37 +5687,16 @@ static const struct file_operations tracing_readme_fops = { static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos) { - int *ptr = v; + int pid = ++(*pos); - if (*pos || m->count) - ptr++; - - (*pos)++; - - for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) { - if (trace_find_tgid(*ptr)) - return ptr; - } - - return NULL; + return trace_find_tgid_ptr(pid); } static void *saved_tgids_start(struct seq_file *m, loff_t *pos) { - void *v; - loff_t l = 0; - - if (!tgid_map) - return NULL; - - v = &tgid_map[0]; - while (l <= *pos) { - v = saved_tgids_next(m, v, &l); - if (!v) - return NULL; - } + int pid = *pos; - return v; + return trace_find_tgid_ptr(pid); } static void saved_tgids_stop(struct seq_file *m, void *v) @@ -5658,9 +5705,14 @@ static void saved_tgids_stop(struct seq_file *m, void *v) static int saved_tgids_show(struct seq_file *m, void *v) { - int pid = (int *)v - tgid_map; + int *entry = (int *)v; + int pid = entry - tgid_map; + int tgid = *entry; - seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid)); + if (tgid == 0) + return SEQ_SKIP; + + seq_printf(m, "%d %d\n", pid, tgid); return 0; } @@ -6145,7 +6197,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr, ssize_t tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu_id) { - int ret = size; + int ret; mutex_lock(&trace_types_lock); @@ -7539,6 +7591,91 @@ static const struct file_operations snapshot_raw_fops = { #endif /* CONFIG_TRACER_SNAPSHOT */ +/* + * trace_min_max_write - Write a u64 value to a trace_min_max_param struct + * @filp: The active open file structure + * @ubuf: The userspace provided buffer to read value into + * @cnt: The maximum number of bytes to read + * @ppos: The current "file" position + * + * This function implements the write interface for a struct trace_min_max_param. + * The filp->private_data must point to a trace_min_max_param structure that + * defines where to write the value, the min and the max acceptable values, + * and a lock to protect the write. + */ +static ssize_t +trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + struct trace_min_max_param *param = filp->private_data; + u64 val; + int err; + + if (!param) + return -EFAULT; + + err = kstrtoull_from_user(ubuf, cnt, 10, &val); + if (err) + return err; + + if (param->lock) + mutex_lock(param->lock); + + if (param->min && val < *param->min) + err = -EINVAL; + + if (param->max && val > *param->max) + err = -EINVAL; + + if (!err) + *param->val = val; + + if (param->lock) + mutex_unlock(param->lock); + + if (err) + return err; + + return cnt; +} + +/* + * trace_min_max_read - Read a u64 value from a trace_min_max_param struct + * @filp: The active open file structure + * @ubuf: The userspace provided buffer to read value into + * @cnt: The maximum number of bytes to read + * @ppos: The current "file" position + * + * This function implements the read interface for a struct trace_min_max_param. + * The filp->private_data must point to a trace_min_max_param struct with valid + * data. + */ +static ssize_t +trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + struct trace_min_max_param *param = filp->private_data; + char buf[U64_STR_SIZE]; + int len; + u64 val; + + if (!param) + return -EFAULT; + + val = *param->val; + + if (cnt > sizeof(buf)) + cnt = sizeof(buf); + + len = snprintf(buf, sizeof(buf), "%llu\n", val); + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); +} + +const struct file_operations trace_min_max_fops = { + .open = tracing_open_generic, + .read = trace_min_max_read, + .write = trace_min_max_write, +}; + #define TRACING_LOG_ERRS_MAX 8 #define TRACING_LOG_LOC_MAX 128 @@ -9542,6 +9679,8 @@ static __init int tracer_init_tracefs(void) return 0; } +fs_initcall(tracer_init_tracefs); + static int trace_panic_handler(struct notifier_block *this, unsigned long event, void *unused) { @@ -9962,7 +10101,7 @@ void __init trace_init(void) trace_event_init(); } -__init static int clear_boot_tracer(void) +__init static void clear_boot_tracer(void) { /* * The default tracer at boot buffer is an init section. @@ -9972,26 +10111,21 @@ __init static int clear_boot_tracer(void) * about to be freed. */ if (!default_bootup_tracer) - return 0; + return; printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", default_bootup_tracer); default_bootup_tracer = NULL; - - return 0; } -fs_initcall(tracer_init_tracefs); -late_initcall_sync(clear_boot_tracer); - #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -__init static int tracing_set_default_clock(void) +__init static void tracing_set_default_clock(void) { /* sched_clock_stable() is determined in late_initcall */ if (!trace_boot_clock && !sched_clock_stable()) { if (security_locked_down(LOCKDOWN_TRACEFS)) { pr_warn("Can not set tracing clock due to lockdown\n"); - return -EPERM; + return; } printk(KERN_WARNING @@ -10001,8 +10135,21 @@ __init static int tracing_set_default_clock(void) "on the kernel command line\n"); tracing_set_clock(&global_trace, "global"); } +} +#else +static inline void tracing_set_default_clock(void) { } +#endif + +__init static int late_trace_init(void) +{ + if (tracepoint_printk && tracepoint_printk_stop_on_boot) { + static_key_disable(&tracepoint_printk_key.key); + tracepoint_printk = 0; + } + tracing_set_default_clock(); + clear_boot_tracer(); return 0; } -late_initcall_sync(tracing_set_default_clock); -#endif + +late_initcall_sync(late_trace_init); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index cd80d046c7a5..a180abf76d4e 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -20,6 +20,7 @@ #include <linux/irq_work.h> #include <linux/workqueue.h> #include <linux/ctype.h> +#include <linux/once_lite.h> #ifdef CONFIG_FTRACE_SYSCALLS #include <asm/unistd.h> /* For NR_SYSCALLS */ @@ -44,6 +45,8 @@ enum trace_type { TRACE_BLK, TRACE_BPUTS, TRACE_HWLAT, + TRACE_OSNOISE, + TRACE_TIMERLAT, TRACE_RAW_DATA, TRACE_FUNC_REPEATS, @@ -99,16 +102,8 @@ enum trace_type { #include "trace_entries.h" /* Use this for memory failure errors */ -#define MEM_FAIL(condition, fmt, ...) ({ \ - static bool __section(".data.once") __warned; \ - int __ret_warn_once = !!(condition); \ - \ - if (unlikely(__ret_warn_once && !__warned)) { \ - __warned = true; \ - pr_err("ERROR: " fmt, ##__VA_ARGS__); \ - } \ - unlikely(__ret_warn_once); \ -}) +#define MEM_FAIL(condition, fmt, ...) \ + DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__) /* * syscalls are special, and need special handling, this is why @@ -297,7 +292,8 @@ struct trace_array { struct array_buffer max_buffer; bool allocated_snapshot; #endif -#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) +#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \ + || defined(CONFIG_OSNOISE_TRACER) unsigned long max_latency; #ifdef CONFIG_FSNOTIFY struct dentry *d_max_latency; @@ -445,6 +441,8 @@ extern void __ftrace_bad_type(void); IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \ + IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\ + IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\ IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\ IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ TRACE_MMIO_RW); \ @@ -675,15 +673,15 @@ void update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu); #endif /* CONFIG_TRACER_MAX_TRACE */ -#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \ - defined(CONFIG_FSNOTIFY) +#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \ + || defined(CONFIG_OSNOISE_TRACER)) && defined(CONFIG_FSNOTIFY) +#define LATENCY_FS_NOTIFY +#endif +#ifdef LATENCY_FS_NOTIFY void latency_fsnotify(struct trace_array *tr); - #else - static inline void latency_fsnotify(struct trace_array *tr) { } - #endif #ifdef CONFIG_STACKTRACE @@ -1952,4 +1950,22 @@ static inline bool is_good_name(const char *name) return true; } +/* + * This is a generic way to read and write a u64 value from a file in tracefs. + * + * The value is stored on the variable pointed by *val. The value needs + * to be at least *min and at most *max. The write is protected by an + * existing *lock. + */ +struct trace_min_max_param { + struct mutex *lock; + u64 *val; + u64 *min; + u64 *max; +}; + +#define U64_STR_SIZE 24 /* 20 digits max */ + +extern const struct file_operations trace_min_max_fops; + #endif /* _LINUX_KERNEL_TRACE_H */ diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index a82f03f385f8..94ef2d099e32 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -225,14 +225,37 @@ static void __init trace_boot_init_events(struct trace_array *tr, struct xbc_node *node) { struct xbc_node *gnode, *enode; + bool enable, enable_all = false; + const char *data; node = xbc_node_find_child(node, "event"); if (!node) return; /* per-event key starts with "event.GROUP.EVENT" */ - xbc_node_for_each_child(node, gnode) - xbc_node_for_each_child(gnode, enode) + xbc_node_for_each_child(node, gnode) { + data = xbc_node_get_data(gnode); + if (!strcmp(data, "enable")) { + enable_all = true; + continue; + } + enable = false; + xbc_node_for_each_child(gnode, enode) { + data = xbc_node_get_data(enode); + if (!strcmp(data, "enable")) { + enable = true; + continue; + } trace_boot_init_one_event(tr, gnode, enode); + } + /* Event enablement must be done after event settings */ + if (enable) { + data = xbc_node_get_data(gnode); + trace_array_set_clr_event(tr, data, NULL, true); + } + } + /* Ditto */ + if (enable_all) + trace_array_set_clr_event(tr, NULL, NULL, true); } #else #define trace_boot_enable_events(tr, node) do {} while (0) diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index c1637f90c8a3..4702efb00ff2 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -115,9 +115,9 @@ u64 notrace trace_clock_global(void) prev_time = READ_ONCE(trace_clock_struct.prev_time); now = sched_clock_cpu(this_cpu); - /* Make sure that now is always greater than prev_time */ + /* Make sure that now is always greater than or equal to prev_time */ if ((s64)(now - prev_time) < 0) - now = prev_time + 1; + now = prev_time; /* * If in an NMI context then dont risk lockups and simply return @@ -131,7 +131,7 @@ u64 notrace trace_clock_global(void) /* Reread prev_time in case it was already updated */ prev_time = READ_ONCE(trace_clock_struct.prev_time); if ((s64)(now - prev_time) < 0) - now = prev_time + 1; + now = prev_time; trace_clock_struct.prev_time = now; diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index 251c819cf0c5..cd41e863b51c 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -360,3 +360,44 @@ FTRACE_ENTRY(func_repeats, func_repeats_entry, __entry->count, FUNC_REPEATS_GET_DELTA_TS(__entry)) ); + +FTRACE_ENTRY(osnoise, osnoise_entry, + + TRACE_OSNOISE, + + F_STRUCT( + __field( u64, noise ) + __field( u64, runtime ) + __field( u64, max_sample ) + __field( unsigned int, hw_count ) + __field( unsigned int, nmi_count ) + __field( unsigned int, irq_count ) + __field( unsigned int, softirq_count ) + __field( unsigned int, thread_count ) + ), + + F_printk("noise:%llu\tmax_sample:%llu\thw:%u\tnmi:%u\tirq:%u\tsoftirq:%u\tthread:%u\n", + __entry->noise, + __entry->max_sample, + __entry->hw_count, + __entry->nmi_count, + __entry->irq_count, + __entry->softirq_count, + __entry->thread_count) +); + +FTRACE_ENTRY(timerlat, timerlat_entry, + + TRACE_TIMERLAT, + + F_STRUCT( + __field( unsigned int, seqnum ) + __field( int, context ) + __field( u64, timer_latency ) + ), + + F_printk("seq:%u\tcontext:%d\ttimer_latency:%llu\n", + __entry->seqnum, + __entry->context, + __entry->timer_latency) +); diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index c1abd63f1d6c..ba03b7d84fc2 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -2434,12 +2434,12 @@ create_field_var_hist(struct hist_trigger_data *target_hist_data, char *subsys_name, char *event_name, char *field_name) { struct trace_array *tr = target_hist_data->event_file->tr; - struct hist_field *event_var = ERR_PTR(-EINVAL); struct hist_trigger_data *hist_data; unsigned int i, n, first = true; struct field_var_hist *var_hist; struct trace_event_file *file; struct hist_field *key_field; + struct hist_field *event_var; char *saved_filter; char *cmd; int ret; @@ -5232,6 +5232,7 @@ static void unregister_field_var_hists(struct hist_trigger_data *hist_data) cmd = hist_data->field_var_hists[i]->cmd; ret = event_hist_trigger_func(&trigger_hist_cmd, file, "!hist", "hist", cmd); + WARN_ON_ONCE(ret < 0); } } diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index b8bfa8505b7b..cf84d0f6583a 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -916,7 +916,8 @@ void unpause_named_trigger(struct event_trigger_data *data) /** * set_named_trigger_data - Associate common named trigger data - * @data: The trigger data of a named trigger to unpause + * @data: The trigger data to associate + * @named_data: The common named trigger to be associated * * Named triggers are sets of triggers that share a common set of * trigger data. The first named trigger registered with a given name diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index 632ef88131a9..a6c0cdaf4b87 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -34,7 +34,7 @@ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com> * Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com> * - * Includes useful feedback from Clark Williams <clark@redhat.com> + * Includes useful feedback from Clark Williams <williams@redhat.com> * */ #include <linux/kthread.h> @@ -54,20 +54,33 @@ static struct trace_array *hwlat_trace; #define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */ #define DEFAULT_LAT_THRESHOLD 10 /* 10us */ -/* sampling thread*/ -static struct task_struct *hwlat_kthread; - static struct dentry *hwlat_sample_width; /* sample width us */ static struct dentry *hwlat_sample_window; /* sample window us */ +static struct dentry *hwlat_thread_mode; /* hwlat thread mode */ + +enum { + MODE_NONE = 0, + MODE_ROUND_ROBIN, + MODE_PER_CPU, + MODE_MAX +}; +static char *thread_mode_str[] = { "none", "round-robin", "per-cpu" }; /* Save the previous tracing_thresh value */ static unsigned long save_tracing_thresh; -/* NMI timestamp counters */ -static u64 nmi_ts_start; -static u64 nmi_total_ts; -static int nmi_count; -static int nmi_cpu; +/* runtime kthread data */ +struct hwlat_kthread_data { + struct task_struct *kthread; + /* NMI timestamp counters */ + u64 nmi_ts_start; + u64 nmi_total_ts; + int nmi_count; + int nmi_cpu; +}; + +struct hwlat_kthread_data hwlat_single_cpu_data; +DEFINE_PER_CPU(struct hwlat_kthread_data, hwlat_per_cpu_data); /* Tells NMIs to call back to the hwlat tracer to record timestamps */ bool trace_hwlat_callback_enabled; @@ -96,11 +109,24 @@ static struct hwlat_data { u64 sample_window; /* total sampling window (on+off) */ u64 sample_width; /* active sampling portion of window */ + int thread_mode; /* thread mode */ + } hwlat_data = { .sample_window = DEFAULT_SAMPLE_WINDOW, .sample_width = DEFAULT_SAMPLE_WIDTH, + .thread_mode = MODE_ROUND_ROBIN }; +static struct hwlat_kthread_data *get_cpu_data(void) +{ + if (hwlat_data.thread_mode == MODE_PER_CPU) + return this_cpu_ptr(&hwlat_per_cpu_data); + else + return &hwlat_single_cpu_data; +} + +static bool hwlat_busy; + static void trace_hwlat_sample(struct hwlat_sample *sample) { struct trace_array *tr = hwlat_trace; @@ -136,7 +162,9 @@ static void trace_hwlat_sample(struct hwlat_sample *sample) void trace_hwlat_callback(bool enter) { - if (smp_processor_id() != nmi_cpu) + struct hwlat_kthread_data *kdata = get_cpu_data(); + + if (!kdata->kthread) return; /* @@ -145,15 +173,24 @@ void trace_hwlat_callback(bool enter) */ if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) { if (enter) - nmi_ts_start = time_get(); + kdata->nmi_ts_start = time_get(); else - nmi_total_ts += time_get() - nmi_ts_start; + kdata->nmi_total_ts += time_get() - kdata->nmi_ts_start; } if (enter) - nmi_count++; + kdata->nmi_count++; } +/* + * hwlat_err - report a hwlat error. + */ +#define hwlat_err(msg) ({ \ + struct trace_array *tr = hwlat_trace; \ + \ + trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_, msg); \ +}) + /** * get_sample - sample the CPU TSC and look for likely hardware latencies * @@ -163,6 +200,7 @@ void trace_hwlat_callback(bool enter) */ static int get_sample(void) { + struct hwlat_kthread_data *kdata = get_cpu_data(); struct trace_array *tr = hwlat_trace; struct hwlat_sample s; time_type start, t1, t2, last_t2; @@ -175,9 +213,8 @@ static int get_sample(void) do_div(thresh, NSEC_PER_USEC); /* modifies interval value */ - nmi_cpu = smp_processor_id(); - nmi_total_ts = 0; - nmi_count = 0; + kdata->nmi_total_ts = 0; + kdata->nmi_count = 0; /* Make sure NMIs see this first */ barrier(); @@ -197,7 +234,7 @@ static int get_sample(void) outer_diff = time_to_us(time_sub(t1, last_t2)); /* This shouldn't happen */ if (outer_diff < 0) { - pr_err(BANNER "time running backwards\n"); + hwlat_err(BANNER "time running backwards\n"); goto out; } if (outer_diff > outer_sample) @@ -209,7 +246,7 @@ static int get_sample(void) /* Check for possible overflows */ if (total < last_total) { - pr_err("Time total overflowed\n"); + hwlat_err("Time total overflowed\n"); break; } last_total = total; @@ -225,7 +262,7 @@ static int get_sample(void) /* This shouldn't happen */ if (diff < 0) { - pr_err(BANNER "time running backwards\n"); + hwlat_err(BANNER "time running backwards\n"); goto out; } @@ -247,15 +284,15 @@ static int get_sample(void) ret = 1; /* We read in microseconds */ - if (nmi_total_ts) - do_div(nmi_total_ts, NSEC_PER_USEC); + if (kdata->nmi_total_ts) + do_div(kdata->nmi_total_ts, NSEC_PER_USEC); hwlat_data.count++; s.seqnum = hwlat_data.count; s.duration = sample; s.outer_duration = outer_sample; - s.nmi_total_ts = nmi_total_ts; - s.nmi_count = nmi_count; + s.nmi_total_ts = kdata->nmi_total_ts; + s.nmi_count = kdata->nmi_count; s.count = count; trace_hwlat_sample(&s); @@ -273,7 +310,6 @@ out: } static struct cpumask save_cpumask; -static bool disable_migrate; static void move_to_next_cpu(void) { @@ -281,15 +317,13 @@ static void move_to_next_cpu(void) struct trace_array *tr = hwlat_trace; int next_cpu; - if (disable_migrate) - return; /* * If for some reason the user modifies the CPU affinity * of this thread, then stop migrating for the duration * of the current test. */ if (!cpumask_equal(current_mask, current->cpus_ptr)) - goto disable; + goto change_mode; get_online_cpus(); cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); @@ -300,7 +334,7 @@ static void move_to_next_cpu(void) next_cpu = cpumask_first(current_mask); if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */ - goto disable; + goto change_mode; cpumask_clear(current_mask); cpumask_set_cpu(next_cpu, current_mask); @@ -308,8 +342,9 @@ static void move_to_next_cpu(void) sched_setaffinity(0, current_mask); return; - disable: - disable_migrate = true; + change_mode: + hwlat_data.thread_mode = MODE_NONE; + pr_info(BANNER "cpumask changed while in round-robin mode, switching to mode none\n"); } /* @@ -328,7 +363,8 @@ static int kthread_fn(void *data) while (!kthread_should_stop()) { - move_to_next_cpu(); + if (hwlat_data.thread_mode == MODE_ROUND_ROBIN) + move_to_next_cpu(); local_irq_disable(); get_sample(); @@ -351,178 +387,380 @@ static int kthread_fn(void *data) return 0; } -/** - * start_kthread - Kick off the hardware latency sampling/detector kthread +/* + * stop_stop_kthread - Inform the hardware latency sampling/detector kthread to stop + * + * This kicks the running hardware latency sampling/detector kernel thread and + * tells it to stop sampling now. Use this on unload and at system shutdown. + */ +static void stop_single_kthread(void) +{ + struct hwlat_kthread_data *kdata = get_cpu_data(); + struct task_struct *kthread; + + get_online_cpus(); + kthread = kdata->kthread; + + if (!kthread) + goto out_put_cpus; + + kthread_stop(kthread); + kdata->kthread = NULL; + +out_put_cpus: + put_online_cpus(); +} + + +/* + * start_single_kthread - Kick off the hardware latency sampling/detector kthread * * This starts the kernel thread that will sit and sample the CPU timestamp * counter (TSC or similar) and look for potential hardware latencies. */ -static int start_kthread(struct trace_array *tr) +static int start_single_kthread(struct trace_array *tr) { + struct hwlat_kthread_data *kdata = get_cpu_data(); struct cpumask *current_mask = &save_cpumask; struct task_struct *kthread; int next_cpu; - if (hwlat_kthread) - return 0; - - /* Just pick the first CPU on first iteration */ get_online_cpus(); - cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); - put_online_cpus(); - next_cpu = cpumask_first(current_mask); + if (kdata->kthread) + goto out_put_cpus; kthread = kthread_create(kthread_fn, NULL, "hwlatd"); if (IS_ERR(kthread)) { pr_err(BANNER "could not start sampling thread\n"); + put_online_cpus(); return -ENOMEM; } - cpumask_clear(current_mask); - cpumask_set_cpu(next_cpu, current_mask); + /* Just pick the first CPU on first iteration */ + cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); + + if (hwlat_data.thread_mode == MODE_ROUND_ROBIN) { + next_cpu = cpumask_first(current_mask); + cpumask_clear(current_mask); + cpumask_set_cpu(next_cpu, current_mask); + + } + sched_setaffinity(kthread->pid, current_mask); - hwlat_kthread = kthread; + kdata->kthread = kthread; wake_up_process(kthread); +out_put_cpus: + put_online_cpus(); return 0; } -/** - * stop_kthread - Inform the hardware latency sampling/detector kthread to stop +/* + * stop_cpu_kthread - Stop a hwlat cpu kthread + */ +static void stop_cpu_kthread(unsigned int cpu) +{ + struct task_struct *kthread; + + kthread = per_cpu(hwlat_per_cpu_data, cpu).kthread; + if (kthread) + kthread_stop(kthread); + per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL; +} + +/* + * stop_per_cpu_kthreads - Inform the hardware latency sampling/detector kthread to stop * - * This kicks the running hardware latency sampling/detector kernel thread and + * This kicks the running hardware latency sampling/detector kernel threads and * tells it to stop sampling now. Use this on unload and at system shutdown. */ -static void stop_kthread(void) +static void stop_per_cpu_kthreads(void) { - if (!hwlat_kthread) - return; - kthread_stop(hwlat_kthread); - hwlat_kthread = NULL; + unsigned int cpu; + + get_online_cpus(); + for_each_online_cpu(cpu) + stop_cpu_kthread(cpu); + put_online_cpus(); } /* - * hwlat_read - Wrapper read function for reading both window and width - * @filp: The active open file structure - * @ubuf: The userspace provided buffer to read value into - * @cnt: The maximum number of bytes to read - * @ppos: The current "file" position - * - * This function provides a generic read implementation for the global state - * "hwlat_data" structure filesystem entries. + * start_cpu_kthread - Start a hwlat cpu kthread */ -static ssize_t hwlat_read(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos) +static int start_cpu_kthread(unsigned int cpu) { - char buf[U64STR_SIZE]; - u64 *entry = filp->private_data; - u64 val; - int len; + struct task_struct *kthread; + char comm[24]; - if (!entry) - return -EFAULT; + snprintf(comm, 24, "hwlatd/%d", cpu); + + kthread = kthread_create_on_cpu(kthread_fn, NULL, cpu, comm); + if (IS_ERR(kthread)) { + pr_err(BANNER "could not start sampling thread\n"); + return -ENOMEM; + } - if (cnt > sizeof(buf)) - cnt = sizeof(buf); + per_cpu(hwlat_per_cpu_data, cpu).kthread = kthread; + wake_up_process(kthread); + + return 0; +} + +#ifdef CONFIG_HOTPLUG_CPU +static void hwlat_hotplug_workfn(struct work_struct *dummy) +{ + struct trace_array *tr = hwlat_trace; + unsigned int cpu = smp_processor_id(); + + mutex_lock(&trace_types_lock); + mutex_lock(&hwlat_data.lock); + get_online_cpus(); + + if (!hwlat_busy || hwlat_data.thread_mode != MODE_PER_CPU) + goto out_unlock; - val = *entry; + if (!cpumask_test_cpu(cpu, tr->tracing_cpumask)) + goto out_unlock; - len = snprintf(buf, sizeof(buf), "%llu\n", val); + start_cpu_kthread(cpu); - return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); +out_unlock: + put_online_cpus(); + mutex_unlock(&hwlat_data.lock); + mutex_unlock(&trace_types_lock); } -/** - * hwlat_width_write - Write function for "width" entry - * @filp: The active open file structure - * @ubuf: The user buffer that contains the value to write - * @cnt: The maximum number of bytes to write to "file" - * @ppos: The current position in @file +static DECLARE_WORK(hwlat_hotplug_work, hwlat_hotplug_workfn); + +/* + * hwlat_cpu_init - CPU hotplug online callback function + */ +static int hwlat_cpu_init(unsigned int cpu) +{ + schedule_work_on(cpu, &hwlat_hotplug_work); + return 0; +} + +/* + * hwlat_cpu_die - CPU hotplug offline callback function + */ +static int hwlat_cpu_die(unsigned int cpu) +{ + stop_cpu_kthread(cpu); + return 0; +} + +static void hwlat_init_hotplug_support(void) +{ + int ret; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "trace/hwlat:online", + hwlat_cpu_init, hwlat_cpu_die); + if (ret < 0) + pr_warn(BANNER "Error to init cpu hotplug support\n"); + + return; +} +#else /* CONFIG_HOTPLUG_CPU */ +static void hwlat_init_hotplug_support(void) +{ + return; +} +#endif /* CONFIG_HOTPLUG_CPU */ + +/* + * start_per_cpu_kthreads - Kick off the hardware latency sampling/detector kthreads * - * This function provides a write implementation for the "width" interface - * to the hardware latency detector. It can be used to configure - * for how many us of the total window us we will actively sample for any - * hardware-induced latency periods. Obviously, it is not possible to - * sample constantly and have the system respond to a sample reader, or, - * worse, without having the system appear to have gone out to lunch. It - * is enforced that width is less that the total window size. + * This starts the kernel threads that will sit on potentially all cpus and + * sample the CPU timestamp counter (TSC or similar) and look for potential + * hardware latencies. */ -static ssize_t -hwlat_width_write(struct file *filp, const char __user *ubuf, - size_t cnt, loff_t *ppos) +static int start_per_cpu_kthreads(struct trace_array *tr) { - u64 val; - int err; + struct cpumask *current_mask = &save_cpumask; + unsigned int cpu; + int retval; - err = kstrtoull_from_user(ubuf, cnt, 10, &val); - if (err) - return err; + get_online_cpus(); + /* + * Run only on CPUs in which hwlat is allowed to run. + */ + cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); + + for_each_online_cpu(cpu) + per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL; + + for_each_cpu(cpu, current_mask) { + retval = start_cpu_kthread(cpu); + if (retval) + goto out_error; + } + put_online_cpus(); + + return 0; + +out_error: + put_online_cpus(); + stop_per_cpu_kthreads(); + return retval; +} + +static void *s_mode_start(struct seq_file *s, loff_t *pos) +{ + int mode = *pos; mutex_lock(&hwlat_data.lock); - if (val < hwlat_data.sample_window) - hwlat_data.sample_width = val; + + if (mode >= MODE_MAX) + return NULL; + + return pos; +} + +static void *s_mode_next(struct seq_file *s, void *v, loff_t *pos) +{ + int mode = ++(*pos); + + if (mode >= MODE_MAX) + return NULL; + + return pos; +} + +static int s_mode_show(struct seq_file *s, void *v) +{ + loff_t *pos = v; + int mode = *pos; + + if (mode == hwlat_data.thread_mode) + seq_printf(s, "[%s]", thread_mode_str[mode]); else - err = -EINVAL; - mutex_unlock(&hwlat_data.lock); + seq_printf(s, "%s", thread_mode_str[mode]); - if (err) - return err; + if (mode != MODE_MAX) + seq_puts(s, " "); + + return 0; +} - return cnt; +static void s_mode_stop(struct seq_file *s, void *v) +{ + seq_puts(s, "\n"); + mutex_unlock(&hwlat_data.lock); } +static const struct seq_operations thread_mode_seq_ops = { + .start = s_mode_start, + .next = s_mode_next, + .show = s_mode_show, + .stop = s_mode_stop +}; + +static int hwlat_mode_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &thread_mode_seq_ops); +}; + +static void hwlat_tracer_start(struct trace_array *tr); +static void hwlat_tracer_stop(struct trace_array *tr); + /** - * hwlat_window_write - Write function for "window" entry + * hwlat_mode_write - Write function for "mode" entry * @filp: The active open file structure * @ubuf: The user buffer that contains the value to write * @cnt: The maximum number of bytes to write to "file" * @ppos: The current position in @file * - * This function provides a write implementation for the "window" interface - * to the hardware latency detector. The window is the total time - * in us that will be considered one sample period. Conceptually, windows - * occur back-to-back and contain a sample width period during which - * actual sampling occurs. Can be used to write a new total window size. It - * is enforced that any value written must be greater than the sample width - * size, or an error results. + * This function provides a write implementation for the "mode" interface + * to the hardware latency detector. hwlatd has different operation modes. + * The "none" sets the allowed cpumask for a single hwlatd thread at the + * startup and lets the scheduler handle the migration. The default mode is + * the "round-robin" one, in which a single hwlatd thread runs, migrating + * among the allowed CPUs in a round-robin fashion. The "per-cpu" mode + * creates one hwlatd thread per allowed CPU. */ -static ssize_t -hwlat_window_write(struct file *filp, const char __user *ubuf, - size_t cnt, loff_t *ppos) +static ssize_t hwlat_mode_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) { - u64 val; - int err; + struct trace_array *tr = hwlat_trace; + const char *mode; + char buf[64]; + int ret, i; - err = kstrtoull_from_user(ubuf, cnt, 10, &val); - if (err) - return err; + if (cnt >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, ubuf, cnt)) + return -EFAULT; + + buf[cnt] = 0; + + mode = strstrip(buf); + + ret = -EINVAL; + + /* + * trace_types_lock is taken to avoid concurrency on start/stop + * and hwlat_busy. + */ + mutex_lock(&trace_types_lock); + if (hwlat_busy) + hwlat_tracer_stop(tr); mutex_lock(&hwlat_data.lock); - if (hwlat_data.sample_width < val) - hwlat_data.sample_window = val; - else - err = -EINVAL; + + for (i = 0; i < MODE_MAX; i++) { + if (strcmp(mode, thread_mode_str[i]) == 0) { + hwlat_data.thread_mode = i; + ret = cnt; + } + } + mutex_unlock(&hwlat_data.lock); - if (err) - return err; + if (hwlat_busy) + hwlat_tracer_start(tr); + mutex_unlock(&trace_types_lock); + + *ppos += cnt; - return cnt; + + + return ret; } -static const struct file_operations width_fops = { - .open = tracing_open_generic, - .read = hwlat_read, - .write = hwlat_width_write, +/* + * The width parameter is read/write using the generic trace_min_max_param + * method. The *val is protected by the hwlat_data lock and is upper + * bounded by the window parameter. + */ +static struct trace_min_max_param hwlat_width = { + .lock = &hwlat_data.lock, + .val = &hwlat_data.sample_width, + .max = &hwlat_data.sample_window, + .min = NULL, }; -static const struct file_operations window_fops = { - .open = tracing_open_generic, - .read = hwlat_read, - .write = hwlat_window_write, +/* + * The window parameter is read/write using the generic trace_min_max_param + * method. The *val is protected by the hwlat_data lock and is lower + * bounded by the width parameter. + */ +static struct trace_min_max_param hwlat_window = { + .lock = &hwlat_data.lock, + .val = &hwlat_data.sample_window, + .max = NULL, + .min = &hwlat_data.sample_width, }; +static const struct file_operations thread_mode_fops = { + .open = hwlat_mode_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, + .write = hwlat_mode_write +}; /** * init_tracefs - A function to initialize the tracefs interface files * @@ -546,18 +784,25 @@ static int init_tracefs(void) hwlat_sample_window = tracefs_create_file("window", 0640, top_dir, - &hwlat_data.sample_window, - &window_fops); + &hwlat_window, + &trace_min_max_fops); if (!hwlat_sample_window) goto err; hwlat_sample_width = tracefs_create_file("width", 0644, top_dir, - &hwlat_data.sample_width, - &width_fops); + &hwlat_width, + &trace_min_max_fops); if (!hwlat_sample_width) goto err; + hwlat_thread_mode = trace_create_file("mode", 0644, + top_dir, + NULL, + &thread_mode_fops); + if (!hwlat_thread_mode) + goto err; + return 0; err: @@ -569,18 +814,22 @@ static void hwlat_tracer_start(struct trace_array *tr) { int err; - err = start_kthread(tr); + if (hwlat_data.thread_mode == MODE_PER_CPU) + err = start_per_cpu_kthreads(tr); + else + err = start_single_kthread(tr); if (err) pr_err(BANNER "Cannot start hwlat kthread\n"); } static void hwlat_tracer_stop(struct trace_array *tr) { - stop_kthread(); + if (hwlat_data.thread_mode == MODE_PER_CPU) + stop_per_cpu_kthreads(); + else + stop_single_kthread(); } -static bool hwlat_busy; - static int hwlat_tracer_init(struct trace_array *tr) { /* Only allow one instance to enable this */ @@ -589,7 +838,6 @@ static int hwlat_tracer_init(struct trace_array *tr) hwlat_trace = tr; - disable_migrate = false; hwlat_data.count = 0; tr->max_latency = 0; save_tracing_thresh = tracing_thresh; @@ -608,7 +856,7 @@ static int hwlat_tracer_init(struct trace_array *tr) static void hwlat_tracer_reset(struct trace_array *tr) { - stop_kthread(); + hwlat_tracer_stop(tr); /* the tracing threshold is static between runs */ last_tracing_thresh = tracing_thresh; @@ -637,6 +885,8 @@ __init static int init_hwlat_tracer(void) if (ret) return ret; + hwlat_init_hotplug_support(); + init_tracefs(); return 0; diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c new file mode 100644 index 000000000000..a7e3c24dee13 --- /dev/null +++ b/kernel/trace/trace_osnoise.c @@ -0,0 +1,2059 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * OS Noise Tracer: computes the OS Noise suffered by a running thread. + * Timerlat Tracer: measures the wakeup latency of a timer triggered IRQ and thread. + * + * Based on "hwlat_detector" tracer by: + * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com> + * Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com> + * With feedback from Clark Williams <williams@redhat.com> + * + * And also based on the rtsl tracer presented on: + * DE OLIVEIRA, Daniel Bristot, et al. Demystifying the real-time linux + * scheduling latency. In: 32nd Euromicro Conference on Real-Time Systems + * (ECRTS 2020). Schloss Dagstuhl-Leibniz-Zentrum fur Informatik, 2020. + * + * Copyright (C) 2021 Daniel Bristot de Oliveira, Red Hat, Inc. <bristot@redhat.com> + */ + +#include <linux/kthread.h> +#include <linux/tracefs.h> +#include <linux/uaccess.h> +#include <linux/cpumask.h> +#include <linux/delay.h> +#include <linux/sched/clock.h> +#include <uapi/linux/sched/types.h> +#include <linux/sched.h> +#include "trace.h" + +#ifdef CONFIG_X86_LOCAL_APIC +#include <asm/trace/irq_vectors.h> +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#endif /* CONFIG_X86_LOCAL_APIC */ + +#include <trace/events/irq.h> +#include <trace/events/sched.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/osnoise.h> + +static struct trace_array *osnoise_trace; + +/* + * Default values. + */ +#define BANNER "osnoise: " +#define DEFAULT_SAMPLE_PERIOD 1000000 /* 1s */ +#define DEFAULT_SAMPLE_RUNTIME 1000000 /* 1s */ + +#define DEFAULT_TIMERLAT_PERIOD 1000 /* 1ms */ +#define DEFAULT_TIMERLAT_PRIO 95 /* FIFO 95 */ + +/* + * NMI runtime info. + */ +struct osn_nmi { + u64 count; + u64 delta_start; +}; + +/* + * IRQ runtime info. + */ +struct osn_irq { + u64 count; + u64 arrival_time; + u64 delta_start; +}; + +#define IRQ_CONTEXT 0 +#define THREAD_CONTEXT 1 +/* + * sofirq runtime info. + */ +struct osn_softirq { + u64 count; + u64 arrival_time; + u64 delta_start; +}; + +/* + * thread runtime info. + */ +struct osn_thread { + u64 count; + u64 arrival_time; + u64 delta_start; +}; + +/* + * Runtime information: this structure saves the runtime information used by + * one sampling thread. + */ +struct osnoise_variables { + struct task_struct *kthread; + bool sampling; + pid_t pid; + struct osn_nmi nmi; + struct osn_irq irq; + struct osn_softirq softirq; + struct osn_thread thread; + local_t int_counter; +}; + +/* + * Per-cpu runtime information. + */ +DEFINE_PER_CPU(struct osnoise_variables, per_cpu_osnoise_var); + +/* + * this_cpu_osn_var - Return the per-cpu osnoise_variables on its relative CPU + */ +static inline struct osnoise_variables *this_cpu_osn_var(void) +{ + return this_cpu_ptr(&per_cpu_osnoise_var); +} + +#ifdef CONFIG_TIMERLAT_TRACER +/* + * Runtime information for the timer mode. + */ +struct timerlat_variables { + struct task_struct *kthread; + struct hrtimer timer; + u64 rel_period; + u64 abs_period; + bool tracing_thread; + u64 count; +}; + +DEFINE_PER_CPU(struct timerlat_variables, per_cpu_timerlat_var); + +/* + * this_cpu_tmr_var - Return the per-cpu timerlat_variables on its relative CPU + */ +static inline struct timerlat_variables *this_cpu_tmr_var(void) +{ + return this_cpu_ptr(&per_cpu_timerlat_var); +} + +/* + * tlat_var_reset - Reset the values of the given timerlat_variables + */ +static inline void tlat_var_reset(void) +{ + struct timerlat_variables *tlat_var; + int cpu; + /* + * So far, all the values are initialized as 0, so + * zeroing the structure is perfect. + */ + for_each_cpu(cpu, cpu_online_mask) { + tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu); + memset(tlat_var, 0, sizeof(*tlat_var)); + } +} +#else /* CONFIG_TIMERLAT_TRACER */ +#define tlat_var_reset() do {} while (0) +#endif /* CONFIG_TIMERLAT_TRACER */ + +/* + * osn_var_reset - Reset the values of the given osnoise_variables + */ +static inline void osn_var_reset(void) +{ + struct osnoise_variables *osn_var; + int cpu; + + /* + * So far, all the values are initialized as 0, so + * zeroing the structure is perfect. + */ + for_each_cpu(cpu, cpu_online_mask) { + osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu); + memset(osn_var, 0, sizeof(*osn_var)); + } +} + +/* + * osn_var_reset_all - Reset the value of all per-cpu osnoise_variables + */ +static inline void osn_var_reset_all(void) +{ + osn_var_reset(); + tlat_var_reset(); +} + +/* + * Tells NMIs to call back to the osnoise tracer to record timestamps. + */ +bool trace_osnoise_callback_enabled; + +/* + * osnoise sample structure definition. Used to store the statistics of a + * sample run. + */ +struct osnoise_sample { + u64 runtime; /* runtime */ + u64 noise; /* noise */ + u64 max_sample; /* max single noise sample */ + int hw_count; /* # HW (incl. hypervisor) interference */ + int nmi_count; /* # NMIs during this sample */ + int irq_count; /* # IRQs during this sample */ + int softirq_count; /* # softirqs during this sample */ + int thread_count; /* # threads during this sample */ +}; + +#ifdef CONFIG_TIMERLAT_TRACER +/* + * timerlat sample structure definition. Used to store the statistics of + * a sample run. + */ +struct timerlat_sample { + u64 timer_latency; /* timer_latency */ + unsigned int seqnum; /* unique sequence */ + int context; /* timer context */ +}; +#endif + +/* + * Protect the interface. + */ +struct mutex interface_lock; + +/* + * Tracer data. + */ +static struct osnoise_data { + u64 sample_period; /* total sampling period */ + u64 sample_runtime; /* active sampling portion of period */ + u64 stop_tracing; /* stop trace in the internal operation (loop/irq) */ + u64 stop_tracing_total; /* stop trace in the final operation (report/thread) */ +#ifdef CONFIG_TIMERLAT_TRACER + u64 timerlat_period; /* timerlat period */ + u64 print_stack; /* print IRQ stack if total > */ + int timerlat_tracer; /* timerlat tracer */ +#endif + bool tainted; /* infor users and developers about a problem */ +} osnoise_data = { + .sample_period = DEFAULT_SAMPLE_PERIOD, + .sample_runtime = DEFAULT_SAMPLE_RUNTIME, + .stop_tracing = 0, + .stop_tracing_total = 0, +#ifdef CONFIG_TIMERLAT_TRACER + .print_stack = 0, + .timerlat_period = DEFAULT_TIMERLAT_PERIOD, + .timerlat_tracer = 0, +#endif +}; + +/* + * Boolean variable used to inform that the tracer is currently sampling. + */ +static bool osnoise_busy; + +/* + * Print the osnoise header info. + */ +static void print_osnoise_headers(struct seq_file *s) +{ + if (osnoise_data.tainted) + seq_puts(s, "# osnoise is tainted!\n"); + + seq_puts(s, "# _-----=> irqs-off\n"); + seq_puts(s, "# / _----=> need-resched\n"); + seq_puts(s, "# | / _---=> hardirq/softirq\n"); + seq_puts(s, "# || / _--=> preempt-depth "); + seq_puts(s, " MAX\n"); + + seq_puts(s, "# || / "); + seq_puts(s, " SINGLE Interference counters:\n"); + + seq_puts(s, "# |||| RUNTIME "); + seq_puts(s, " NOISE %% OF CPU NOISE +-----------------------------+\n"); + + seq_puts(s, "# TASK-PID CPU# |||| TIMESTAMP IN US "); + seq_puts(s, " IN US AVAILABLE IN US HW NMI IRQ SIRQ THREAD\n"); + + seq_puts(s, "# | | | |||| | | "); + seq_puts(s, " | | | | | | | |\n"); +} + +/* + * osnoise_taint - report an osnoise error. + */ +#define osnoise_taint(msg) ({ \ + struct trace_array *tr = osnoise_trace; \ + \ + trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_, msg); \ + osnoise_data.tainted = true; \ +}) + +/* + * Record an osnoise_sample into the tracer buffer. + */ +static void trace_osnoise_sample(struct osnoise_sample *sample) +{ + struct trace_array *tr = osnoise_trace; + struct trace_buffer *buffer = tr->array_buffer.buffer; + struct trace_event_call *call = &event_osnoise; + struct ring_buffer_event *event; + struct osnoise_entry *entry; + + event = trace_buffer_lock_reserve(buffer, TRACE_OSNOISE, sizeof(*entry), + tracing_gen_ctx()); + if (!event) + return; + entry = ring_buffer_event_data(event); + entry->runtime = sample->runtime; + entry->noise = sample->noise; + entry->max_sample = sample->max_sample; + entry->hw_count = sample->hw_count; + entry->nmi_count = sample->nmi_count; + entry->irq_count = sample->irq_count; + entry->softirq_count = sample->softirq_count; + entry->thread_count = sample->thread_count; + + if (!call_filter_check_discard(call, entry, buffer, event)) + trace_buffer_unlock_commit_nostack(buffer, event); +} + +#ifdef CONFIG_TIMERLAT_TRACER +/* + * Print the timerlat header info. + */ +static void print_timerlat_headers(struct seq_file *s) +{ + seq_puts(s, "# _-----=> irqs-off\n"); + seq_puts(s, "# / _----=> need-resched\n"); + seq_puts(s, "# | / _---=> hardirq/softirq\n"); + seq_puts(s, "# || / _--=> preempt-depth\n"); + seq_puts(s, "# || /\n"); + seq_puts(s, "# |||| ACTIVATION\n"); + seq_puts(s, "# TASK-PID CPU# |||| TIMESTAMP ID "); + seq_puts(s, " CONTEXT LATENCY\n"); + seq_puts(s, "# | | | |||| | | "); + seq_puts(s, " | |\n"); +} + +/* + * Record an timerlat_sample into the tracer buffer. + */ +static void trace_timerlat_sample(struct timerlat_sample *sample) +{ + struct trace_array *tr = osnoise_trace; + struct trace_event_call *call = &event_osnoise; + struct trace_buffer *buffer = tr->array_buffer.buffer; + struct ring_buffer_event *event; + struct timerlat_entry *entry; + + event = trace_buffer_lock_reserve(buffer, TRACE_TIMERLAT, sizeof(*entry), + tracing_gen_ctx()); + if (!event) + return; + entry = ring_buffer_event_data(event); + entry->seqnum = sample->seqnum; + entry->context = sample->context; + entry->timer_latency = sample->timer_latency; + + if (!call_filter_check_discard(call, entry, buffer, event)) + trace_buffer_unlock_commit_nostack(buffer, event); +} + +#ifdef CONFIG_STACKTRACE + +#define MAX_CALLS 256 + +/* + * Stack trace will take place only at IRQ level, so, no need + * to control nesting here. + */ +struct trace_stack { + int stack_size; + int nr_entries; + unsigned long calls[MAX_CALLS]; +}; + +static DEFINE_PER_CPU(struct trace_stack, trace_stack); + +/* + * timerlat_save_stack - save a stack trace without printing + * + * Save the current stack trace without printing. The + * stack will be printed later, after the end of the measurement. + */ +static void timerlat_save_stack(int skip) +{ + unsigned int size, nr_entries; + struct trace_stack *fstack; + + fstack = this_cpu_ptr(&trace_stack); + + size = ARRAY_SIZE(fstack->calls); + + nr_entries = stack_trace_save(fstack->calls, size, skip); + + fstack->stack_size = nr_entries * sizeof(unsigned long); + fstack->nr_entries = nr_entries; + + return; + +} +/* + * timerlat_dump_stack - dump a stack trace previously saved + * + * Dump a saved stack trace into the trace buffer. + */ +static void timerlat_dump_stack(void) +{ + struct trace_event_call *call = &event_osnoise; + struct trace_array *tr = osnoise_trace; + struct trace_buffer *buffer = tr->array_buffer.buffer; + struct ring_buffer_event *event; + struct trace_stack *fstack; + struct stack_entry *entry; + unsigned int size; + + preempt_disable_notrace(); + fstack = this_cpu_ptr(&trace_stack); + size = fstack->stack_size; + + event = trace_buffer_lock_reserve(buffer, TRACE_STACK, sizeof(*entry) + size, + tracing_gen_ctx()); + if (!event) + goto out; + + entry = ring_buffer_event_data(event); + + memcpy(&entry->caller, fstack->calls, size); + entry->size = fstack->nr_entries; + + if (!call_filter_check_discard(call, entry, buffer, event)) + trace_buffer_unlock_commit_nostack(buffer, event); + +out: + preempt_enable_notrace(); +} +#else +#define timerlat_dump_stack() do {} while (0) +#define timerlat_save_stack(a) do {} while (0) +#endif /* CONFIG_STACKTRACE */ +#endif /* CONFIG_TIMERLAT_TRACER */ + +/* + * Macros to encapsulate the time capturing infrastructure. + */ +#define time_get() trace_clock_local() +#define time_to_us(x) div_u64(x, 1000) +#define time_sub(a, b) ((a) - (b)) + +/* + * cond_move_irq_delta_start - Forward the delta_start of a running IRQ + * + * If an IRQ is preempted by an NMI, its delta_start is pushed forward + * to discount the NMI interference. + * + * See get_int_safe_duration(). + */ +static inline void +cond_move_irq_delta_start(struct osnoise_variables *osn_var, u64 duration) +{ + if (osn_var->irq.delta_start) + osn_var->irq.delta_start += duration; +} + +#ifndef CONFIG_PREEMPT_RT +/* + * cond_move_softirq_delta_start - Forward the delta_start of a running softirq. + * + * If a softirq is preempted by an IRQ or NMI, its delta_start is pushed + * forward to discount the interference. + * + * See get_int_safe_duration(). + */ +static inline void +cond_move_softirq_delta_start(struct osnoise_variables *osn_var, u64 duration) +{ + if (osn_var->softirq.delta_start) + osn_var->softirq.delta_start += duration; +} +#else /* CONFIG_PREEMPT_RT */ +#define cond_move_softirq_delta_start(osn_var, duration) do {} while (0) +#endif + +/* + * cond_move_thread_delta_start - Forward the delta_start of a running thread + * + * If a noisy thread is preempted by an softirq, IRQ or NMI, its delta_start + * is pushed forward to discount the interference. + * + * See get_int_safe_duration(). + */ +static inline void +cond_move_thread_delta_start(struct osnoise_variables *osn_var, u64 duration) +{ + if (osn_var->thread.delta_start) + osn_var->thread.delta_start += duration; +} + +/* + * get_int_safe_duration - Get the duration of a window + * + * The irq, softirq and thread varaibles need to have its duration without + * the interference from higher priority interrupts. Instead of keeping a + * variable to discount the interrupt interference from these variables, the + * starting time of these variables are pushed forward with the interrupt's + * duration. In this way, a single variable is used to: + * + * - Know if a given window is being measured. + * - Account its duration. + * - Discount the interference. + * + * To avoid getting inconsistent values, e.g.,: + * + * now = time_get() + * ---> interrupt! + * delta_start -= int duration; + * <--- + * duration = now - delta_start; + * + * result: negative duration if the variable duration before the + * interrupt was smaller than the interrupt execution. + * + * A counter of interrupts is used. If the counter increased, try + * to capture an interference safe duration. + */ +static inline s64 +get_int_safe_duration(struct osnoise_variables *osn_var, u64 *delta_start) +{ + u64 int_counter, now; + s64 duration; + + do { + int_counter = local_read(&osn_var->int_counter); + /* synchronize with interrupts */ + barrier(); + + now = time_get(); + duration = (now - *delta_start); + + /* synchronize with interrupts */ + barrier(); + } while (int_counter != local_read(&osn_var->int_counter)); + + /* + * This is an evidence of race conditions that cause + * a value to be "discounted" too much. + */ + if (duration < 0) + osnoise_taint("Negative duration!\n"); + + *delta_start = 0; + + return duration; +} + +/* + * + * set_int_safe_time - Save the current time on *time, aware of interference + * + * Get the time, taking into consideration a possible interference from + * higher priority interrupts. + * + * See get_int_safe_duration() for an explanation. + */ +static u64 +set_int_safe_time(struct osnoise_variables *osn_var, u64 *time) +{ + u64 int_counter; + + do { + int_counter = local_read(&osn_var->int_counter); + /* synchronize with interrupts */ + barrier(); + + *time = time_get(); + + /* synchronize with interrupts */ + barrier(); + } while (int_counter != local_read(&osn_var->int_counter)); + + return int_counter; +} + +#ifdef CONFIG_TIMERLAT_TRACER +/* + * copy_int_safe_time - Copy *src into *desc aware of interference + */ +static u64 +copy_int_safe_time(struct osnoise_variables *osn_var, u64 *dst, u64 *src) +{ + u64 int_counter; + + do { + int_counter = local_read(&osn_var->int_counter); + /* synchronize with interrupts */ + barrier(); + + *dst = *src; + + /* synchronize with interrupts */ + barrier(); + } while (int_counter != local_read(&osn_var->int_counter)); + + return int_counter; +} +#endif /* CONFIG_TIMERLAT_TRACER */ + +/* + * trace_osnoise_callback - NMI entry/exit callback + * + * This function is called at the entry and exit NMI code. The bool enter + * distinguishes between either case. This function is used to note a NMI + * occurrence, compute the noise caused by the NMI, and to remove the noise + * it is potentially causing on other interference variables. + */ +void trace_osnoise_callback(bool enter) +{ + struct osnoise_variables *osn_var = this_cpu_osn_var(); + u64 duration; + + if (!osn_var->sampling) + return; + + /* + * Currently trace_clock_local() calls sched_clock() and the + * generic version is not NMI safe. + */ + if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) { + if (enter) { + osn_var->nmi.delta_start = time_get(); + local_inc(&osn_var->int_counter); + } else { + duration = time_get() - osn_var->nmi.delta_start; + + trace_nmi_noise(osn_var->nmi.delta_start, duration); + + cond_move_irq_delta_start(osn_var, duration); + cond_move_softirq_delta_start(osn_var, duration); + cond_move_thread_delta_start(osn_var, duration); + } + } + + if (enter) + osn_var->nmi.count++; +} + +/* + * osnoise_trace_irq_entry - Note the starting of an IRQ + * + * Save the starting time of an IRQ. As IRQs are non-preemptive to other IRQs, + * it is safe to use a single variable (ons_var->irq) to save the statistics. + * The arrival_time is used to report... the arrival time. The delta_start + * is used to compute the duration at the IRQ exit handler. See + * cond_move_irq_delta_start(). + */ +void osnoise_trace_irq_entry(int id) +{ + struct osnoise_variables *osn_var = this_cpu_osn_var(); + + if (!osn_var->sampling) + return; + /* + * This value will be used in the report, but not to compute + * the execution time, so it is safe to get it unsafe. + */ + osn_var->irq.arrival_time = time_get(); + set_int_safe_time(osn_var, &osn_var->irq.delta_start); + osn_var->irq.count++; + + local_inc(&osn_var->int_counter); +} + +/* + * osnoise_irq_exit - Note the end of an IRQ, sava data and trace + * + * Computes the duration of the IRQ noise, and trace it. Also discounts the + * interference from other sources of noise could be currently being accounted. + */ +void osnoise_trace_irq_exit(int id, const char *desc) +{ + struct osnoise_variables *osn_var = this_cpu_osn_var(); + int duration; + + if (!osn_var->sampling) + return; + + duration = get_int_safe_duration(osn_var, &osn_var->irq.delta_start); + trace_irq_noise(id, desc, osn_var->irq.arrival_time, duration); + osn_var->irq.arrival_time = 0; + cond_move_softirq_delta_start(osn_var, duration); + cond_move_thread_delta_start(osn_var, duration); +} + +/* + * trace_irqentry_callback - Callback to the irq:irq_entry traceevent + * + * Used to note the starting of an IRQ occurece. + */ +static void trace_irqentry_callback(void *data, int irq, + struct irqaction *action) +{ + osnoise_trace_irq_entry(irq); +} + +/* + * trace_irqexit_callback - Callback to the irq:irq_exit traceevent + * + * Used to note the end of an IRQ occurece. + */ +static void trace_irqexit_callback(void *data, int irq, + struct irqaction *action, int ret) +{ + osnoise_trace_irq_exit(irq, action->name); +} + +/* + * arch specific register function. + */ +int __weak osnoise_arch_register(void) +{ + return 0; +} + +/* + * arch specific unregister function. + */ +void __weak osnoise_arch_unregister(void) +{ + return; +} + +/* + * hook_irq_events - Hook IRQ handling events + * + * This function hooks the IRQ related callbacks to the respective trace + * events. + */ +static int hook_irq_events(void) +{ + int ret; + + ret = register_trace_irq_handler_entry(trace_irqentry_callback, NULL); + if (ret) + goto out_err; + + ret = register_trace_irq_handler_exit(trace_irqexit_callback, NULL); + if (ret) + goto out_unregister_entry; + + ret = osnoise_arch_register(); + if (ret) + goto out_irq_exit; + + return 0; + +out_irq_exit: + unregister_trace_irq_handler_exit(trace_irqexit_callback, NULL); +out_unregister_entry: + unregister_trace_irq_handler_entry(trace_irqentry_callback, NULL); +out_err: + return -EINVAL; +} + +/* + * unhook_irq_events - Unhook IRQ handling events + * + * This function unhooks the IRQ related callbacks to the respective trace + * events. + */ +static void unhook_irq_events(void) +{ + osnoise_arch_unregister(); + unregister_trace_irq_handler_exit(trace_irqexit_callback, NULL); + unregister_trace_irq_handler_entry(trace_irqentry_callback, NULL); +} + +#ifndef CONFIG_PREEMPT_RT +/* + * trace_softirq_entry_callback - Note the starting of a softirq + * + * Save the starting time of a softirq. As softirqs are non-preemptive to + * other softirqs, it is safe to use a single variable (ons_var->softirq) + * to save the statistics. The arrival_time is used to report... the + * arrival time. The delta_start is used to compute the duration at the + * softirq exit handler. See cond_move_softirq_delta_start(). + */ +static void trace_softirq_entry_callback(void *data, unsigned int vec_nr) +{ + struct osnoise_variables *osn_var = this_cpu_osn_var(); + + if (!osn_var->sampling) + return; + /* + * This value will be used in the report, but not to compute + * the execution time, so it is safe to get it unsafe. + */ + osn_var->softirq.arrival_time = time_get(); + set_int_safe_time(osn_var, &osn_var->softirq.delta_start); + osn_var->softirq.count++; + + local_inc(&osn_var->int_counter); +} + +/* + * trace_softirq_exit_callback - Note the end of an softirq + * + * Computes the duration of the softirq noise, and trace it. Also discounts the + * interference from other sources of noise could be currently being accounted. + */ +static void trace_softirq_exit_callback(void *data, unsigned int vec_nr) +{ + struct osnoise_variables *osn_var = this_cpu_osn_var(); + int duration; + + if (!osn_var->sampling) + return; + +#ifdef CONFIG_TIMERLAT_TRACER + /* + * If the timerlat is enabled, but the irq handler did + * not run yet enabling timerlat_tracer, do not trace. + */ + if (unlikely(osnoise_data.timerlat_tracer)) { + struct timerlat_variables *tlat_var; + tlat_var = this_cpu_tmr_var(); + if (!tlat_var->tracing_thread) { + osn_var->softirq.arrival_time = 0; + osn_var->softirq.delta_start = 0; + return; + } + } +#endif + + duration = get_int_safe_duration(osn_var, &osn_var->softirq.delta_start); + trace_softirq_noise(vec_nr, osn_var->softirq.arrival_time, duration); + cond_move_thread_delta_start(osn_var, duration); + osn_var->softirq.arrival_time = 0; +} + +/* + * hook_softirq_events - Hook softirq handling events + * + * This function hooks the softirq related callbacks to the respective trace + * events. + */ +static int hook_softirq_events(void) +{ + int ret; + + ret = register_trace_softirq_entry(trace_softirq_entry_callback, NULL); + if (ret) + goto out_err; + + ret = register_trace_softirq_exit(trace_softirq_exit_callback, NULL); + if (ret) + goto out_unreg_entry; + + return 0; + +out_unreg_entry: + unregister_trace_softirq_entry(trace_softirq_entry_callback, NULL); +out_err: + return -EINVAL; +} + +/* + * unhook_softirq_events - Unhook softirq handling events + * + * This function hooks the softirq related callbacks to the respective trace + * events. + */ +static void unhook_softirq_events(void) +{ + unregister_trace_softirq_entry(trace_softirq_entry_callback, NULL); + unregister_trace_softirq_exit(trace_softirq_exit_callback, NULL); +} +#else /* CONFIG_PREEMPT_RT */ +/* + * softirq are threads on the PREEMPT_RT mode. + */ +static int hook_softirq_events(void) +{ + return 0; +} +static void unhook_softirq_events(void) +{ +} +#endif + +/* + * thread_entry - Record the starting of a thread noise window + * + * It saves the context switch time for a noisy thread, and increments + * the interference counters. + */ +static void +thread_entry(struct osnoise_variables *osn_var, struct task_struct *t) +{ + if (!osn_var->sampling) + return; + /* + * The arrival time will be used in the report, but not to compute + * the execution time, so it is safe to get it unsafe. + */ + osn_var->thread.arrival_time = time_get(); + + set_int_safe_time(osn_var, &osn_var->thread.delta_start); + + osn_var->thread.count++; + local_inc(&osn_var->int_counter); +} + +/* + * thread_exit - Report the end of a thread noise window + * + * It computes the total noise from a thread, tracing if needed. + */ +static void +thread_exit(struct osnoise_variables *osn_var, struct task_struct *t) +{ + int duration; + + if (!osn_var->sampling) + return; + +#ifdef CONFIG_TIMERLAT_TRACER + if (osnoise_data.timerlat_tracer) { + struct timerlat_variables *tlat_var; + tlat_var = this_cpu_tmr_var(); + if (!tlat_var->tracing_thread) { + osn_var->thread.delta_start = 0; + osn_var->thread.arrival_time = 0; + return; + } + } +#endif + + duration = get_int_safe_duration(osn_var, &osn_var->thread.delta_start); + + trace_thread_noise(t, osn_var->thread.arrival_time, duration); + + osn_var->thread.arrival_time = 0; +} + +/* + * trace_sched_switch - sched:sched_switch trace event handler + * + * This function is hooked to the sched:sched_switch trace event, and it is + * used to record the beginning and to report the end of a thread noise window. + */ +static void +trace_sched_switch_callback(void *data, bool preempt, struct task_struct *p, + struct task_struct *n) +{ + struct osnoise_variables *osn_var = this_cpu_osn_var(); + + if (p->pid != osn_var->pid) + thread_exit(osn_var, p); + + if (n->pid != osn_var->pid) + thread_entry(osn_var, n); +} + +/* + * hook_thread_events - Hook the insturmentation for thread noise + * + * Hook the osnoise tracer callbacks to handle the noise from other + * threads on the necessary kernel events. + */ +static int hook_thread_events(void) +{ + int ret; + + ret = register_trace_sched_switch(trace_sched_switch_callback, NULL); + if (ret) + return -EINVAL; + + return 0; +} + +/* + * unhook_thread_events - *nhook the insturmentation for thread noise + * + * Unook the osnoise tracer callbacks to handle the noise from other + * threads on the necessary kernel events. + */ +static void unhook_thread_events(void) +{ + unregister_trace_sched_switch(trace_sched_switch_callback, NULL); +} + +/* + * save_osn_sample_stats - Save the osnoise_sample statistics + * + * Save the osnoise_sample statistics before the sampling phase. These + * values will be used later to compute the diff betwneen the statistics + * before and after the osnoise sampling. + */ +static void +save_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *s) +{ + s->nmi_count = osn_var->nmi.count; + s->irq_count = osn_var->irq.count; + s->softirq_count = osn_var->softirq.count; + s->thread_count = osn_var->thread.count; +} + +/* + * diff_osn_sample_stats - Compute the osnoise_sample statistics + * + * After a sample period, compute the difference on the osnoise_sample + * statistics. The struct osnoise_sample *s contains the statistics saved via + * save_osn_sample_stats() before the osnoise sampling. + */ +static void +diff_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *s) +{ + s->nmi_count = osn_var->nmi.count - s->nmi_count; + s->irq_count = osn_var->irq.count - s->irq_count; + s->softirq_count = osn_var->softirq.count - s->softirq_count; + s->thread_count = osn_var->thread.count - s->thread_count; +} + +/* + * osnoise_stop_tracing - Stop tracing and the tracer. + */ +static void osnoise_stop_tracing(void) +{ + struct trace_array *tr = osnoise_trace; + tracer_tracing_off(tr); +} + +/* + * run_osnoise - Sample the time and look for osnoise + * + * Used to capture the time, looking for potential osnoise latency repeatedly. + * Different from hwlat_detector, it is called with preemption and interrupts + * enabled. This allows irqs, softirqs and threads to run, interfering on the + * osnoise sampling thread, as they would do with a regular thread. + */ +static int run_osnoise(void) +{ + struct osnoise_variables *osn_var = this_cpu_osn_var(); + struct trace_array *tr = osnoise_trace; + u64 start, sample, last_sample; + u64 last_int_count, int_count; + s64 noise = 0, max_noise = 0; + s64 total, last_total = 0; + struct osnoise_sample s; + unsigned int threshold; + u64 runtime, stop_in; + u64 sum_noise = 0; + int hw_count = 0; + int ret = -1; + + /* + * Considers the current thread as the workload. + */ + osn_var->pid = current->pid; + + /* + * Save the current stats for the diff + */ + save_osn_sample_stats(osn_var, &s); + + /* + * if threshold is 0, use the default value of 5 us. + */ + threshold = tracing_thresh ? : 5000; + + /* + * Make sure NMIs see sampling first + */ + osn_var->sampling = true; + barrier(); + + /* + * Transform the *_us config to nanoseconds to avoid the + * division on the main loop. + */ + runtime = osnoise_data.sample_runtime * NSEC_PER_USEC; + stop_in = osnoise_data.stop_tracing * NSEC_PER_USEC; + + /* + * Start timestemp + */ + start = time_get(); + + /* + * "previous" loop. + */ + last_int_count = set_int_safe_time(osn_var, &last_sample); + + do { + /* + * Get sample! + */ + int_count = set_int_safe_time(osn_var, &sample); + + noise = time_sub(sample, last_sample); + + /* + * This shouldn't happen. + */ + if (noise < 0) { + osnoise_taint("negative noise!"); + goto out; + } + + /* + * Sample runtime. + */ + total = time_sub(sample, start); + + /* + * Check for possible overflows. + */ + if (total < last_total) { + osnoise_taint("total overflow!"); + break; + } + + last_total = total; + + if (noise >= threshold) { + int interference = int_count - last_int_count; + + if (noise > max_noise) + max_noise = noise; + + if (!interference) + hw_count++; + + sum_noise += noise; + + trace_sample_threshold(last_sample, noise, interference); + + if (osnoise_data.stop_tracing) + if (noise > stop_in) + osnoise_stop_tracing(); + } + + /* + * For the non-preemptive kernel config: let threads runs, if + * they so wish. + */ + cond_resched(); + + last_sample = sample; + last_int_count = int_count; + + } while (total < runtime && !kthread_should_stop()); + + /* + * Finish the above in the view for interrupts. + */ + barrier(); + + osn_var->sampling = false; + + /* + * Make sure sampling data is no longer updated. + */ + barrier(); + + /* + * Save noise info. + */ + s.noise = time_to_us(sum_noise); + s.runtime = time_to_us(total); + s.max_sample = time_to_us(max_noise); + s.hw_count = hw_count; + + /* Save interference stats info */ + diff_osn_sample_stats(osn_var, &s); + + trace_osnoise_sample(&s); + + /* Keep a running maximum ever recorded osnoise "latency" */ + if (max_noise > tr->max_latency) { + tr->max_latency = max_noise; + latency_fsnotify(tr); + } + + if (osnoise_data.stop_tracing_total) + if (s.noise > osnoise_data.stop_tracing_total) + osnoise_stop_tracing(); + + return 0; +out: + return ret; +} + +static struct cpumask osnoise_cpumask; +static struct cpumask save_cpumask; + +/* + * osnoise_main - The osnoise detection kernel thread + * + * Calls run_osnoise() function to measure the osnoise for the configured runtime, + * every period. + */ +static int osnoise_main(void *data) +{ + u64 interval; + + while (!kthread_should_stop()) { + + run_osnoise(); + + mutex_lock(&interface_lock); + interval = osnoise_data.sample_period - osnoise_data.sample_runtime; + mutex_unlock(&interface_lock); + + do_div(interval, USEC_PER_MSEC); + + /* + * differently from hwlat_detector, the osnoise tracer can run + * without a pause because preemption is on. + */ + if (interval < 1) { + /* Let synchronize_rcu_tasks() make progress */ + cond_resched_tasks_rcu_qs(); + continue; + } + + if (msleep_interruptible(interval)) + break; + } + + return 0; +} + +#ifdef CONFIG_TIMERLAT_TRACER +/* + * timerlat_irq - hrtimer handler for timerlat. + */ +static enum hrtimer_restart timerlat_irq(struct hrtimer *timer) +{ + struct osnoise_variables *osn_var = this_cpu_osn_var(); + struct trace_array *tr = osnoise_trace; + struct timerlat_variables *tlat; + struct timerlat_sample s; + u64 now; + u64 diff; + + /* + * I am not sure if the timer was armed for this CPU. So, get + * the timerlat struct from the timer itself, not from this + * CPU. + */ + tlat = container_of(timer, struct timerlat_variables, timer); + + now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer)); + + /* + * Enable the osnoise: events for thread an softirq. + */ + tlat->tracing_thread = true; + + osn_var->thread.arrival_time = time_get(); + + /* + * A hardirq is running: the timer IRQ. It is for sure preempting + * a thread, and potentially preempting a softirq. + * + * At this point, it is not interesting to know the duration of the + * preempted thread (and maybe softirq), but how much time they will + * delay the beginning of the execution of the timer thread. + * + * To get the correct (net) delay added by the softirq, its delta_start + * is set as the IRQ one. In this way, at the return of the IRQ, the delta + * start of the sofitrq will be zeroed, accounting then only the time + * after that. + * + * The thread follows the same principle. However, if a softirq is + * running, the thread needs to receive the softirq delta_start. The + * reason being is that the softirq will be the last to be unfolded, + * resseting the thread delay to zero. + */ +#ifndef CONFIG_PREEMPT_RT + if (osn_var->softirq.delta_start) { + copy_int_safe_time(osn_var, &osn_var->thread.delta_start, + &osn_var->softirq.delta_start); + + copy_int_safe_time(osn_var, &osn_var->softirq.delta_start, + &osn_var->irq.delta_start); + } else { + copy_int_safe_time(osn_var, &osn_var->thread.delta_start, + &osn_var->irq.delta_start); + } +#else /* CONFIG_PREEMPT_RT */ + /* + * The sofirqs run as threads on RT, so there is not need + * to keep track of it. + */ + copy_int_safe_time(osn_var, &osn_var->thread.delta_start, &osn_var->irq.delta_start); +#endif /* CONFIG_PREEMPT_RT */ + + /* + * Compute the current time with the expected time. + */ + diff = now - tlat->abs_period; + + tlat->count++; + s.seqnum = tlat->count; + s.timer_latency = diff; + s.context = IRQ_CONTEXT; + + trace_timerlat_sample(&s); + + /* Keep a running maximum ever recorded os noise "latency" */ + if (diff > tr->max_latency) { + tr->max_latency = diff; + latency_fsnotify(tr); + } + + if (osnoise_data.stop_tracing) + if (time_to_us(diff) >= osnoise_data.stop_tracing) + osnoise_stop_tracing(); + + wake_up_process(tlat->kthread); + + if (osnoise_data.print_stack) + timerlat_save_stack(0); + + return HRTIMER_NORESTART; +} + +/* + * wait_next_period - Wait for the next period for timerlat + */ +static int wait_next_period(struct timerlat_variables *tlat) +{ + ktime_t next_abs_period, now; + u64 rel_period = osnoise_data.timerlat_period * 1000; + + now = hrtimer_cb_get_time(&tlat->timer); + next_abs_period = ns_to_ktime(tlat->abs_period + rel_period); + + /* + * Save the next abs_period. + */ + tlat->abs_period = (u64) ktime_to_ns(next_abs_period); + + /* + * If the new abs_period is in the past, skip the activation. + */ + while (ktime_compare(now, next_abs_period) > 0) { + next_abs_period = ns_to_ktime(tlat->abs_period + rel_period); + tlat->abs_period = (u64) ktime_to_ns(next_abs_period); + } + + set_current_state(TASK_INTERRUPTIBLE); + + hrtimer_start(&tlat->timer, next_abs_period, HRTIMER_MODE_ABS_PINNED_HARD); + schedule(); + return 1; +} + +/* + * timerlat_main- Timerlat main + */ +static int timerlat_main(void *data) +{ + struct osnoise_variables *osn_var = this_cpu_osn_var(); + struct timerlat_variables *tlat = this_cpu_tmr_var(); + struct timerlat_sample s; + struct sched_param sp; + u64 now, diff; + + /* + * Make the thread RT, that is how cyclictest is usually used. + */ + sp.sched_priority = DEFAULT_TIMERLAT_PRIO; + sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); + + tlat->count = 0; + tlat->tracing_thread = false; + + hrtimer_init(&tlat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); + tlat->timer.function = timerlat_irq; + tlat->kthread = current; + osn_var->pid = current->pid; + /* + * Anotate the arrival time. + */ + tlat->abs_period = hrtimer_cb_get_time(&tlat->timer); + + wait_next_period(tlat); + + osn_var->sampling = 1; + + while (!kthread_should_stop()) { + now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer)); + diff = now - tlat->abs_period; + + s.seqnum = tlat->count; + s.timer_latency = diff; + s.context = THREAD_CONTEXT; + + trace_timerlat_sample(&s); + +#ifdef CONFIG_STACKTRACE + if (osnoise_data.print_stack) + if (osnoise_data.print_stack <= time_to_us(diff)) + timerlat_dump_stack(); +#endif /* CONFIG_STACKTRACE */ + + tlat->tracing_thread = false; + if (osnoise_data.stop_tracing_total) + if (time_to_us(diff) >= osnoise_data.stop_tracing_total) + osnoise_stop_tracing(); + + wait_next_period(tlat); + } + + hrtimer_cancel(&tlat->timer); + return 0; +} +#endif /* CONFIG_TIMERLAT_TRACER */ + +/* + * stop_kthread - stop a workload thread + */ +static void stop_kthread(unsigned int cpu) +{ + struct task_struct *kthread; + + kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread; + if (kthread) + kthread_stop(kthread); + per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL; +} + +/* + * stop_per_cpu_kthread - Stop per-cpu threads + * + * Stop the osnoise sampling htread. Use this on unload and at system + * shutdown. + */ +static void stop_per_cpu_kthreads(void) +{ + int cpu; + + get_online_cpus(); + + for_each_online_cpu(cpu) + stop_kthread(cpu); + + put_online_cpus(); +} + +/* + * start_kthread - Start a workload tread + */ +static int start_kthread(unsigned int cpu) +{ + struct task_struct *kthread; + void *main = osnoise_main; + char comm[24]; + +#ifdef CONFIG_TIMERLAT_TRACER + if (osnoise_data.timerlat_tracer) { + snprintf(comm, 24, "timerlat/%d", cpu); + main = timerlat_main; + } else { + snprintf(comm, 24, "osnoise/%d", cpu); + } +#else + snprintf(comm, 24, "osnoise/%d", cpu); +#endif + kthread = kthread_create_on_cpu(main, NULL, cpu, comm); + + if (IS_ERR(kthread)) { + pr_err(BANNER "could not start sampling thread\n"); + stop_per_cpu_kthreads(); + return -ENOMEM; + } + + per_cpu(per_cpu_osnoise_var, cpu).kthread = kthread; + wake_up_process(kthread); + + return 0; +} + +/* + * start_per_cpu_kthread - Kick off per-cpu osnoise sampling kthreads + * + * This starts the kernel thread that will look for osnoise on many + * cpus. + */ +static int start_per_cpu_kthreads(struct trace_array *tr) +{ + struct cpumask *current_mask = &save_cpumask; + int retval; + int cpu; + + get_online_cpus(); + /* + * Run only on CPUs in which trace and osnoise are allowed to run. + */ + cpumask_and(current_mask, tr->tracing_cpumask, &osnoise_cpumask); + /* + * And the CPU is online. + */ + cpumask_and(current_mask, cpu_online_mask, current_mask); + + for_each_possible_cpu(cpu) + per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL; + + for_each_cpu(cpu, current_mask) { + retval = start_kthread(cpu); + if (retval) { + stop_per_cpu_kthreads(); + return retval; + } + } + + put_online_cpus(); + + return 0; +} + +#ifdef CONFIG_HOTPLUG_CPU +static void osnoise_hotplug_workfn(struct work_struct *dummy) +{ + struct trace_array *tr = osnoise_trace; + unsigned int cpu = smp_processor_id(); + + + mutex_lock(&trace_types_lock); + + if (!osnoise_busy) + goto out_unlock_trace; + + mutex_lock(&interface_lock); + get_online_cpus(); + + if (!cpumask_test_cpu(cpu, &osnoise_cpumask)) + goto out_unlock; + + if (!cpumask_test_cpu(cpu, tr->tracing_cpumask)) + goto out_unlock; + + start_kthread(cpu); + +out_unlock: + put_online_cpus(); + mutex_unlock(&interface_lock); +out_unlock_trace: + mutex_unlock(&trace_types_lock); +} + +static DECLARE_WORK(osnoise_hotplug_work, osnoise_hotplug_workfn); + +/* + * osnoise_cpu_init - CPU hotplug online callback function + */ +static int osnoise_cpu_init(unsigned int cpu) +{ + schedule_work_on(cpu, &osnoise_hotplug_work); + return 0; +} + +/* + * osnoise_cpu_die - CPU hotplug offline callback function + */ +static int osnoise_cpu_die(unsigned int cpu) +{ + stop_kthread(cpu); + return 0; +} + +static void osnoise_init_hotplug_support(void) +{ + int ret; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "trace/osnoise:online", + osnoise_cpu_init, osnoise_cpu_die); + if (ret < 0) + pr_warn(BANNER "Error to init cpu hotplug support\n"); + + return; +} +#else /* CONFIG_HOTPLUG_CPU */ +static void osnoise_init_hotplug_support(void) +{ + return; +} +#endif /* CONFIG_HOTPLUG_CPU */ + +/* + * osnoise_cpus_read - Read function for reading the "cpus" file + * @filp: The active open file structure + * @ubuf: The userspace provided buffer to read value into + * @cnt: The maximum number of bytes to read + * @ppos: The current "file" position + * + * Prints the "cpus" output into the user-provided buffer. + */ +static ssize_t +osnoise_cpus_read(struct file *filp, char __user *ubuf, size_t count, + loff_t *ppos) +{ + char *mask_str; + int len; + + mutex_lock(&interface_lock); + + len = snprintf(NULL, 0, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask)) + 1; + mask_str = kmalloc(len, GFP_KERNEL); + if (!mask_str) { + count = -ENOMEM; + goto out_unlock; + } + + len = snprintf(mask_str, len, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask)); + if (len >= count) { + count = -EINVAL; + goto out_free; + } + + count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); + +out_free: + kfree(mask_str); +out_unlock: + mutex_unlock(&interface_lock); + + return count; +} + +static void osnoise_tracer_start(struct trace_array *tr); +static void osnoise_tracer_stop(struct trace_array *tr); + +/* + * osnoise_cpus_write - Write function for "cpus" entry + * @filp: The active open file structure + * @ubuf: The user buffer that contains the value to write + * @cnt: The maximum number of bytes to write to "file" + * @ppos: The current position in @file + * + * This function provides a write implementation for the "cpus" + * interface to the osnoise trace. By default, it lists all CPUs, + * in this way, allowing osnoise threads to run on any online CPU + * of the system. It serves to restrict the execution of osnoise to the + * set of CPUs writing via this interface. Note that osnoise also + * respects the "tracing_cpumask." Hence, osnoise threads will run only + * on the set of CPUs allowed here AND on "tracing_cpumask." Why not + * have just "tracing_cpumask?" Because the user might be interested + * in tracing what is running on other CPUs. For instance, one might + * run osnoise in one HT CPU while observing what is running on the + * sibling HT CPU. + */ +static ssize_t +osnoise_cpus_write(struct file *filp, const char __user *ubuf, size_t count, + loff_t *ppos) +{ + struct trace_array *tr = osnoise_trace; + cpumask_var_t osnoise_cpumask_new; + int running, err; + char buf[256]; + + if (count >= 256) + return -EINVAL; + + if (copy_from_user(buf, ubuf, count)) + return -EFAULT; + + if (!zalloc_cpumask_var(&osnoise_cpumask_new, GFP_KERNEL)) + return -ENOMEM; + + err = cpulist_parse(buf, osnoise_cpumask_new); + if (err) + goto err_free; + + /* + * trace_types_lock is taken to avoid concurrency on start/stop + * and osnoise_busy. + */ + mutex_lock(&trace_types_lock); + running = osnoise_busy; + if (running) + osnoise_tracer_stop(tr); + + mutex_lock(&interface_lock); + /* + * osnoise_cpumask is read by CPU hotplug operations. + */ + get_online_cpus(); + + cpumask_copy(&osnoise_cpumask, osnoise_cpumask_new); + + put_online_cpus(); + mutex_unlock(&interface_lock); + + if (running) + osnoise_tracer_start(tr); + mutex_unlock(&trace_types_lock); + + free_cpumask_var(osnoise_cpumask_new); + return count; + +err_free: + free_cpumask_var(osnoise_cpumask_new); + + return err; +} + +/* + * osnoise/runtime_us: cannot be greater than the period. + */ +static struct trace_min_max_param osnoise_runtime = { + .lock = &interface_lock, + .val = &osnoise_data.sample_runtime, + .max = &osnoise_data.sample_period, + .min = NULL, +}; + +/* + * osnoise/period_us: cannot be smaller than the runtime. + */ +static struct trace_min_max_param osnoise_period = { + .lock = &interface_lock, + .val = &osnoise_data.sample_period, + .max = NULL, + .min = &osnoise_data.sample_runtime, +}; + +/* + * osnoise/stop_tracing_us: no limit. + */ +static struct trace_min_max_param osnoise_stop_tracing_in = { + .lock = &interface_lock, + .val = &osnoise_data.stop_tracing, + .max = NULL, + .min = NULL, +}; + +/* + * osnoise/stop_tracing_total_us: no limit. + */ +static struct trace_min_max_param osnoise_stop_tracing_total = { + .lock = &interface_lock, + .val = &osnoise_data.stop_tracing_total, + .max = NULL, + .min = NULL, +}; + +#ifdef CONFIG_TIMERLAT_TRACER +/* + * osnoise/print_stack: print the stacktrace of the IRQ handler if the total + * latency is higher than val. + */ +static struct trace_min_max_param osnoise_print_stack = { + .lock = &interface_lock, + .val = &osnoise_data.print_stack, + .max = NULL, + .min = NULL, +}; + +/* + * osnoise/timerlat_period: min 100 us, max 1 s + */ +u64 timerlat_min_period = 100; +u64 timerlat_max_period = 1000000; +static struct trace_min_max_param timerlat_period = { + .lock = &interface_lock, + .val = &osnoise_data.timerlat_period, + .max = &timerlat_max_period, + .min = &timerlat_min_period, +}; +#endif + +static const struct file_operations cpus_fops = { + .open = tracing_open_generic, + .read = osnoise_cpus_read, + .write = osnoise_cpus_write, + .llseek = generic_file_llseek, +}; + +/* + * init_tracefs - A function to initialize the tracefs interface files + * + * This function creates entries in tracefs for "osnoise" and "timerlat". + * It creates these directories in the tracing directory, and within that + * directory the use can change and view the configs. + */ +static int init_tracefs(void) +{ + struct dentry *top_dir; + struct dentry *tmp; + int ret; + + ret = tracing_init_dentry(); + if (ret) + return -ENOMEM; + + top_dir = tracefs_create_dir("osnoise", NULL); + if (!top_dir) + return 0; + + tmp = tracefs_create_file("period_us", 0640, top_dir, + &osnoise_period, &trace_min_max_fops); + if (!tmp) + goto err; + + tmp = tracefs_create_file("runtime_us", 0644, top_dir, + &osnoise_runtime, &trace_min_max_fops); + if (!tmp) + goto err; + + tmp = tracefs_create_file("stop_tracing_us", 0640, top_dir, + &osnoise_stop_tracing_in, &trace_min_max_fops); + if (!tmp) + goto err; + + tmp = tracefs_create_file("stop_tracing_total_us", 0640, top_dir, + &osnoise_stop_tracing_total, &trace_min_max_fops); + if (!tmp) + goto err; + + tmp = trace_create_file("cpus", 0644, top_dir, NULL, &cpus_fops); + if (!tmp) + goto err; +#ifdef CONFIG_TIMERLAT_TRACER +#ifdef CONFIG_STACKTRACE + tmp = tracefs_create_file("print_stack", 0640, top_dir, + &osnoise_print_stack, &trace_min_max_fops); + if (!tmp) + goto err; +#endif + + tmp = tracefs_create_file("timerlat_period_us", 0640, top_dir, + &timerlat_period, &trace_min_max_fops); + if (!tmp) + goto err; +#endif + + return 0; + +err: + tracefs_remove(top_dir); + return -ENOMEM; +} + +static int osnoise_hook_events(void) +{ + int retval; + + /* + * Trace is already hooked, we are re-enabling from + * a stop_tracing_*. + */ + if (trace_osnoise_callback_enabled) + return 0; + + retval = hook_irq_events(); + if (retval) + return -EINVAL; + + retval = hook_softirq_events(); + if (retval) + goto out_unhook_irq; + + retval = hook_thread_events(); + /* + * All fine! + */ + if (!retval) + return 0; + + unhook_softirq_events(); +out_unhook_irq: + unhook_irq_events(); + return -EINVAL; +} + +static int __osnoise_tracer_start(struct trace_array *tr) +{ + int retval; + + osn_var_reset_all(); + + retval = osnoise_hook_events(); + if (retval) + return retval; + /* + * Make sure NMIs see reseted values. + */ + barrier(); + trace_osnoise_callback_enabled = true; + + retval = start_per_cpu_kthreads(tr); + if (retval) { + unhook_irq_events(); + return retval; + } + + osnoise_busy = true; + + return 0; +} + +static void osnoise_tracer_start(struct trace_array *tr) +{ + int retval; + + if (osnoise_busy) + return; + + retval = __osnoise_tracer_start(tr); + if (retval) + pr_err(BANNER "Error starting osnoise tracer\n"); + +} + +static void osnoise_tracer_stop(struct trace_array *tr) +{ + if (!osnoise_busy) + return; + + trace_osnoise_callback_enabled = false; + barrier(); + + stop_per_cpu_kthreads(); + + unhook_irq_events(); + unhook_softirq_events(); + unhook_thread_events(); + + osnoise_busy = false; +} + +static int osnoise_tracer_init(struct trace_array *tr) +{ + + /* Only allow one instance to enable this */ + if (osnoise_busy) + return -EBUSY; + + osnoise_trace = tr; + tr->max_latency = 0; + + osnoise_tracer_start(tr); + + return 0; +} + +static void osnoise_tracer_reset(struct trace_array *tr) +{ + osnoise_tracer_stop(tr); +} + +static struct tracer osnoise_tracer __read_mostly = { + .name = "osnoise", + .init = osnoise_tracer_init, + .reset = osnoise_tracer_reset, + .start = osnoise_tracer_start, + .stop = osnoise_tracer_stop, + .print_header = print_osnoise_headers, + .allow_instances = true, +}; + +#ifdef CONFIG_TIMERLAT_TRACER +static void timerlat_tracer_start(struct trace_array *tr) +{ + int retval; + + if (osnoise_busy) + return; + + osnoise_data.timerlat_tracer = 1; + + retval = __osnoise_tracer_start(tr); + if (retval) + goto out_err; + + return; +out_err: + pr_err(BANNER "Error starting timerlat tracer\n"); +} + +static void timerlat_tracer_stop(struct trace_array *tr) +{ + int cpu; + + if (!osnoise_busy) + return; + + for_each_online_cpu(cpu) + per_cpu(per_cpu_osnoise_var, cpu).sampling = 0; + + osnoise_tracer_stop(tr); + + osnoise_data.timerlat_tracer = 0; +} + +static int timerlat_tracer_init(struct trace_array *tr) +{ + /* Only allow one instance to enable this */ + if (osnoise_busy) + return -EBUSY; + + osnoise_trace = tr; + + tr->max_latency = 0; + + timerlat_tracer_start(tr); + + return 0; +} + +static void timerlat_tracer_reset(struct trace_array *tr) +{ + timerlat_tracer_stop(tr); +} + +static struct tracer timerlat_tracer __read_mostly = { + .name = "timerlat", + .init = timerlat_tracer_init, + .reset = timerlat_tracer_reset, + .start = timerlat_tracer_start, + .stop = timerlat_tracer_stop, + .print_header = print_timerlat_headers, + .allow_instances = true, +}; +#endif /* CONFIG_TIMERLAT_TRACER */ + +__init static int init_osnoise_tracer(void) +{ + int ret; + + mutex_init(&interface_lock); + + cpumask_copy(&osnoise_cpumask, cpu_all_mask); + + ret = register_tracer(&osnoise_tracer); + if (ret) { + pr_err(BANNER "Error registering osnoise!\n"); + return ret; + } + +#ifdef CONFIG_TIMERLAT_TRACER + ret = register_tracer(&timerlat_tracer); + if (ret) { + pr_err(BANNER "Error registering timerlat\n"); + return ret; + } +#endif + osnoise_init_hotplug_support(); + + init_tracefs(); + + return 0; +} +late_initcall(init_osnoise_tracer); diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index d0368a569bfa..a0bf446bb034 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -1202,7 +1202,6 @@ trace_hwlat_print(struct trace_iterator *iter, int flags, return trace_handle_return(s); } - static enum print_line_t trace_hwlat_raw(struct trace_iterator *iter, int flags, struct trace_event *event) @@ -1232,6 +1231,122 @@ static struct trace_event trace_hwlat_event = { .funcs = &trace_hwlat_funcs, }; +/* TRACE_OSNOISE */ +static enum print_line_t +trace_osnoise_print(struct trace_iterator *iter, int flags, + struct trace_event *event) +{ + struct trace_entry *entry = iter->ent; + struct trace_seq *s = &iter->seq; + struct osnoise_entry *field; + u64 ratio, ratio_dec; + u64 net_runtime; + + trace_assign_type(field, entry); + + /* + * compute the available % of cpu time. + */ + net_runtime = field->runtime - field->noise; + ratio = net_runtime * 10000000; + do_div(ratio, field->runtime); + ratio_dec = do_div(ratio, 100000); + + trace_seq_printf(s, "%llu %10llu %3llu.%05llu %7llu", + field->runtime, + field->noise, + ratio, ratio_dec, + field->max_sample); + + trace_seq_printf(s, " %6u", field->hw_count); + trace_seq_printf(s, " %6u", field->nmi_count); + trace_seq_printf(s, " %6u", field->irq_count); + trace_seq_printf(s, " %6u", field->softirq_count); + trace_seq_printf(s, " %6u", field->thread_count); + + trace_seq_putc(s, '\n'); + + return trace_handle_return(s); +} + +static enum print_line_t +trace_osnoise_raw(struct trace_iterator *iter, int flags, + struct trace_event *event) +{ + struct osnoise_entry *field; + struct trace_seq *s = &iter->seq; + + trace_assign_type(field, iter->ent); + + trace_seq_printf(s, "%lld %llu %llu %u %u %u %u %u\n", + field->runtime, + field->noise, + field->max_sample, + field->hw_count, + field->nmi_count, + field->irq_count, + field->softirq_count, + field->thread_count); + + return trace_handle_return(s); +} + +static struct trace_event_functions trace_osnoise_funcs = { + .trace = trace_osnoise_print, + .raw = trace_osnoise_raw, +}; + +static struct trace_event trace_osnoise_event = { + .type = TRACE_OSNOISE, + .funcs = &trace_osnoise_funcs, +}; + +/* TRACE_TIMERLAT */ +static enum print_line_t +trace_timerlat_print(struct trace_iterator *iter, int flags, + struct trace_event *event) +{ + struct trace_entry *entry = iter->ent; + struct trace_seq *s = &iter->seq; + struct timerlat_entry *field; + + trace_assign_type(field, entry); + + trace_seq_printf(s, "#%-5u context %6s timer_latency %9llu ns\n", + field->seqnum, + field->context ? "thread" : "irq", + field->timer_latency); + + return trace_handle_return(s); +} + +static enum print_line_t +trace_timerlat_raw(struct trace_iterator *iter, int flags, + struct trace_event *event) +{ + struct timerlat_entry *field; + struct trace_seq *s = &iter->seq; + + trace_assign_type(field, iter->ent); + + trace_seq_printf(s, "%u %d %llu\n", + field->seqnum, + field->context, + field->timer_latency); + + return trace_handle_return(s); +} + +static struct trace_event_functions trace_timerlat_funcs = { + .trace = trace_timerlat_print, + .raw = trace_timerlat_raw, +}; + +static struct trace_event trace_timerlat_event = { + .type = TRACE_TIMERLAT, + .funcs = &trace_timerlat_funcs, +}; + /* TRACE_BPUTS */ static enum print_line_t trace_bputs_print(struct trace_iterator *iter, int flags, @@ -1442,6 +1557,8 @@ static struct trace_event *events[] __initdata = { &trace_bprint_event, &trace_print_event, &trace_hwlat_event, + &trace_osnoise_event, + &trace_timerlat_event, &trace_raw_data_event, &trace_func_repeats_event, NULL diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index e5778d1d7a5b..2402de520eca 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -26,9 +26,9 @@ static struct task_struct *wakeup_task; static int wakeup_cpu; static int wakeup_current_cpu; static unsigned wakeup_prio = -1; -static int wakeup_rt; -static int wakeup_dl; -static int tracing_dl = 0; +static bool wakeup_rt; +static bool wakeup_dl; +static bool tracing_dl; static arch_spinlock_t wakeup_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; @@ -498,7 +498,7 @@ static void __wakeup_reset(struct trace_array *tr) { wakeup_cpu = -1; wakeup_prio = -1; - tracing_dl = 0; + tracing_dl = false; if (wakeup_task) put_task_struct(wakeup_task); @@ -572,9 +572,9 @@ probe_wakeup(void *ignore, struct task_struct *p) * another task until the first one wakes up. */ if (dl_task(p)) - tracing_dl = 1; + tracing_dl = true; else - tracing_dl = 0; + tracing_dl = false; wakeup_task = get_task_struct(p); @@ -685,8 +685,8 @@ static int wakeup_tracer_init(struct trace_array *tr) if (wakeup_busy) return -EBUSY; - wakeup_dl = 0; - wakeup_rt = 0; + wakeup_dl = false; + wakeup_rt = false; return __wakeup_tracer_init(tr); } @@ -695,8 +695,8 @@ static int wakeup_rt_tracer_init(struct trace_array *tr) if (wakeup_busy) return -EBUSY; - wakeup_dl = 0; - wakeup_rt = 1; + wakeup_dl = false; + wakeup_rt = true; return __wakeup_tracer_init(tr); } @@ -705,8 +705,8 @@ static int wakeup_dl_tracer_init(struct trace_array *tr) if (wakeup_busy) return -EBUSY; - wakeup_dl = 1; - wakeup_rt = 0; + wakeup_dl = true; + wakeup_rt = false; return __wakeup_tracer_init(tr); } diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 9f478d29b926..976bf8ce8039 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -273,7 +273,8 @@ static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func * Add the probe function to a tracepoint. */ static int tracepoint_add_func(struct tracepoint *tp, - struct tracepoint_func *func, int prio) + struct tracepoint_func *func, int prio, + bool warn) { struct tracepoint_func *old, *tp_funcs; int ret; @@ -288,7 +289,7 @@ static int tracepoint_add_func(struct tracepoint *tp, lockdep_is_held(&tracepoints_mutex)); old = func_add(&tp_funcs, func, prio); if (IS_ERR(old)) { - WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM); + WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM); return PTR_ERR(old); } @@ -344,6 +345,32 @@ static int tracepoint_remove_func(struct tracepoint *tp, } /** + * tracepoint_probe_register_prio_may_exist - Connect a probe to a tracepoint with priority + * @tp: tracepoint + * @probe: probe handler + * @data: tracepoint data + * @prio: priority of this function over other registered functions + * + * Same as tracepoint_probe_register_prio() except that it will not warn + * if the tracepoint is already registered. + */ +int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, + void *data, int prio) +{ + struct tracepoint_func tp_func; + int ret; + + mutex_lock(&tracepoints_mutex); + tp_func.func = probe; + tp_func.data = data; + tp_func.prio = prio; + ret = tracepoint_add_func(tp, &tp_func, prio, false); + mutex_unlock(&tracepoints_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist); + +/** * tracepoint_probe_register_prio - Connect a probe to a tracepoint with priority * @tp: tracepoint * @probe: probe handler @@ -366,7 +393,7 @@ int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, tp_func.func = probe; tp_func.data = data; tp_func.prio = prio; - ret = tracepoint_add_func(tp, &tp_func, prio); + ret = tracepoint_add_func(tp, &tp_func, prio, true); mutex_unlock(&tracepoints_mutex); return ret; } diff --git a/kernel/ucount.c b/kernel/ucount.c index 8d8874f1c35e..87799e2379bd 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c @@ -8,6 +8,12 @@ #include <linux/kmemleak.h> #include <linux/user_namespace.h> +struct ucounts init_ucounts = { + .ns = &init_user_ns, + .uid = GLOBAL_ROOT_UID, + .count = ATOMIC_INIT(1), +}; + #define UCOUNTS_HASHTABLE_BITS 10 static struct hlist_head ucounts_hashtable[(1 << UCOUNTS_HASHTABLE_BITS)]; static DEFINE_SPINLOCK(ucounts_lock); @@ -78,6 +84,10 @@ static struct ctl_table user_table[] = { UCOUNT_ENTRY("max_fanotify_groups"), UCOUNT_ENTRY("max_fanotify_marks"), #endif + { }, + { }, + { }, + { }, { } }; #endif /* CONFIG_SYSCTL */ @@ -129,7 +139,24 @@ static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struc return NULL; } -static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) +static void hlist_add_ucounts(struct ucounts *ucounts) +{ + struct hlist_head *hashent = ucounts_hashentry(ucounts->ns, ucounts->uid); + spin_lock_irq(&ucounts_lock); + hlist_add_head(&ucounts->node, hashent); + spin_unlock_irq(&ucounts_lock); +} + +struct ucounts *get_ucounts(struct ucounts *ucounts) +{ + if (ucounts && atomic_add_negative(1, &ucounts->count)) { + put_ucounts(ucounts); + ucounts = NULL; + } + return ucounts; +} + +struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid) { struct hlist_head *hashent = ucounts_hashentry(ns, uid); struct ucounts *ucounts, *new; @@ -145,7 +172,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) new->ns = ns; new->uid = uid; - new->count = 0; + atomic_set(&new->count, 1); spin_lock_irq(&ucounts_lock); ucounts = find_ucounts(ns, uid, hashent); @@ -153,40 +180,35 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) kfree(new); } else { hlist_add_head(&new->node, hashent); - ucounts = new; + spin_unlock_irq(&ucounts_lock); + return new; } } - if (ucounts->count == INT_MAX) - ucounts = NULL; - else - ucounts->count += 1; spin_unlock_irq(&ucounts_lock); + ucounts = get_ucounts(ucounts); return ucounts; } -static void put_ucounts(struct ucounts *ucounts) +void put_ucounts(struct ucounts *ucounts) { unsigned long flags; - spin_lock_irqsave(&ucounts_lock, flags); - ucounts->count -= 1; - if (!ucounts->count) + if (atomic_dec_and_test(&ucounts->count)) { + spin_lock_irqsave(&ucounts_lock, flags); hlist_del_init(&ucounts->node); - else - ucounts = NULL; - spin_unlock_irqrestore(&ucounts_lock, flags); - - kfree(ucounts); + spin_unlock_irqrestore(&ucounts_lock, flags); + kfree(ucounts); + } } -static inline bool atomic_inc_below(atomic_t *v, int u) +static inline bool atomic_long_inc_below(atomic_long_t *v, int u) { - int c, old; - c = atomic_read(v); + long c, old; + c = atomic_long_read(v); for (;;) { if (unlikely(c >= u)) return false; - old = atomic_cmpxchg(v, c, c+1); + old = atomic_long_cmpxchg(v, c, c+1); if (likely(old == c)) return true; c = old; @@ -198,19 +220,19 @@ struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, { struct ucounts *ucounts, *iter, *bad; struct user_namespace *tns; - ucounts = get_ucounts(ns, uid); + ucounts = alloc_ucounts(ns, uid); for (iter = ucounts; iter; iter = tns->ucounts) { - int max; + long max; tns = iter->ns; max = READ_ONCE(tns->ucount_max[type]); - if (!atomic_inc_below(&iter->ucount[type], max)) + if (!atomic_long_inc_below(&iter->ucount[type], max)) goto fail; } return ucounts; fail: bad = iter; for (iter = ucounts; iter != bad; iter = iter->ns->ucounts) - atomic_dec(&iter->ucount[type]); + atomic_long_dec(&iter->ucount[type]); put_ucounts(ucounts); return NULL; @@ -220,12 +242,54 @@ void dec_ucount(struct ucounts *ucounts, enum ucount_type type) { struct ucounts *iter; for (iter = ucounts; iter; iter = iter->ns->ucounts) { - int dec = atomic_dec_if_positive(&iter->ucount[type]); + long dec = atomic_long_dec_if_positive(&iter->ucount[type]); WARN_ON_ONCE(dec < 0); } put_ucounts(ucounts); } +long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v) +{ + struct ucounts *iter; + long ret = 0; + + for (iter = ucounts; iter; iter = iter->ns->ucounts) { + long max = READ_ONCE(iter->ns->ucount_max[type]); + long new = atomic_long_add_return(v, &iter->ucount[type]); + if (new < 0 || new > max) + ret = LONG_MAX; + else if (iter == ucounts) + ret = new; + } + return ret; +} + +bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v) +{ + struct ucounts *iter; + long new = -1; /* Silence compiler warning */ + for (iter = ucounts; iter; iter = iter->ns->ucounts) { + long dec = atomic_long_add_return(-v, &iter->ucount[type]); + WARN_ON_ONCE(dec < 0); + if (iter == ucounts) + new = dec; + } + return (new == 0); +} + +bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max) +{ + struct ucounts *iter; + if (get_ucounts_value(ucounts, type) > max) + return true; + for (iter = ucounts; iter; iter = iter->ns->ucounts) { + max = READ_ONCE(iter->ns->ucount_max[type]); + if (get_ucounts_value(iter, type) > max) + return true; + } + return false; +} + static __init int user_namespace_sysctl_init(void) { #ifdef CONFIG_SYSCTL @@ -241,6 +305,8 @@ static __init int user_namespace_sysctl_init(void) BUG_ON(!user_header); BUG_ON(!setup_userns_sysctls(&init_user_ns)); #endif + hlist_add_ucounts(&init_ucounts); + inc_rlimit_ucounts(&init_ucounts, UCOUNT_RLIMIT_NPROC, 1); return 0; } subsys_initcall(user_namespace_sysctl_init); diff --git a/kernel/user.c b/kernel/user.c index a2478cddf536..c82399c1618a 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -98,9 +98,6 @@ static DEFINE_SPINLOCK(uidhash_lock); /* root_user.__count is 1, for init task cred */ struct user_struct root_user = { .__count = REFCOUNT_INIT(1), - .processes = ATOMIC_INIT(1), - .sigpending = ATOMIC_INIT(0), - .locked_shm = 0, .uid = GLOBAL_ROOT_UID, .ratelimit = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0), }; diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 8d62863721b0..ef82d401dde8 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -119,9 +119,13 @@ int create_user_ns(struct cred *new) ns->owner = owner; ns->group = group; INIT_WORK(&ns->work, free_user_ns); - for (i = 0; i < UCOUNT_COUNTS; i++) { + for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++) { ns->ucount_max[i] = INT_MAX; } + set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)); + set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_MSGQUEUE, rlimit(RLIMIT_MSGQUEUE)); + set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_SIGPENDING, rlimit(RLIMIT_SIGPENDING)); + set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_MEMLOCK, rlimit(RLIMIT_MEMLOCK)); ns->ucounts = ucounts; /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */ @@ -1340,6 +1344,9 @@ static int userns_install(struct nsset *nsset, struct ns_common *ns) put_user_ns(cred->user_ns); set_cred_user_ns(cred, get_user_ns(user_ns)); + if (set_cred_ucounts(cred) < 0) + return -EINVAL; + return 0; } diff --git a/kernel/usermode_driver.c b/kernel/usermode_driver.c index bb7bb3b478ab..9dae1f648713 100644 --- a/kernel/usermode_driver.c +++ b/kernel/usermode_driver.c @@ -26,7 +26,7 @@ static struct vfsmount *blob_to_mnt(const void *data, size_t len, const char *na if (IS_ERR(mnt)) return mnt; - file = file_open_root(mnt->mnt_root, mnt, name, O_CREAT | O_WRONLY, 0700); + file = file_open_root_mnt(mnt, name, O_CREAT | O_WRONLY, 0700); if (IS_ERR(file)) { mntput(mnt); return ERR_CAST(file); diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 92d3bcc5a5e0..ad912511a0c0 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -92,7 +92,7 @@ __setup("nmi_watchdog=", hardlockup_panic_setup); * own hardlockup detector. * * watchdog_nmi_enable/disable can be implemented to start and stop when - * softlockup watchdog threads start and stop. The arch must select the + * softlockup watchdog start and stop. The arch must select the * SOFTLOCKUP_DETECTOR Kconfig. */ int __weak watchdog_nmi_enable(unsigned int cpu) @@ -335,7 +335,7 @@ static DEFINE_PER_CPU(struct completion, softlockup_completion); static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work); /* - * The watchdog thread function - touches the timestamp. + * The watchdog feed function - touches the timestamp. * * It only runs once every sample_period seconds (4 seconds by * default) to reset the softlockup timestamp. If this gets delayed @@ -558,11 +558,7 @@ static void lockup_detector_reconfigure(void) } /* - * Create the watchdog thread infrastructure and configure the detector(s). - * - * The threads are not unparked as watchdog_allowed_mask is empty. When - * the threads are successfully initialized, take the proper locks and - * unpark the threads in the watchdog_cpumask if the watchdog is enabled. + * Create the watchdog infrastructure and configure the detector(s). */ static __init void lockup_detector_setup(void) { @@ -628,7 +624,7 @@ void lockup_detector_soft_poweroff(void) #ifdef CONFIG_SYSCTL -/* Propagate any changes to the watchdog threads */ +/* Propagate any changes to the watchdog infrastructure */ static void proc_watchdog_update(void) { /* Remove impossible cpus to keep sysctl output clean. */ |