summaryrefslogtreecommitdiffstats
path: root/arch/riscv
diff options
context:
space:
mode:
authorSamuel Holland <samuel.holland@sifive.com>2024-10-16 22:27:46 +0200
committerPalmer Dabbelt <palmer@rivosinc.com>2024-10-24 23:12:56 +0200
commit2e1743085887ba3f553c2bb472a75a3ff744b242 (patch)
tree9c076c49bb87c2697d88fed12f9d274349722d85 /arch/riscv
parentriscv: Add support for userspace pointer masking (diff)
downloadlinux-2e1743085887ba3f553c2bb472a75a3ff744b242.tar.xz
linux-2e1743085887ba3f553c2bb472a75a3ff744b242.zip
riscv: Add support for the tagged address ABI
When pointer masking is enabled for userspace, the kernel can accept tagged pointers as arguments to some system calls. Allow this by untagging the pointers in access_ok() and the uaccess routines. The uaccess routines must peform untagging in software because U-mode and S-mode have entirely separate pointer masking configurations. In fact, hardware may not even implement pointer masking for S-mode. Since the number of tag bits is variable, untagged_addr_remote() needs to know what PMLEN to use for the remote mm. Therefore, the pointer masking mode must be the same for all threads sharing an mm. Enforce this with a lock flag in the mm context, as x86 does for LAM. The flag gets reset in init_new_context() during fork(), as the new mm is no longer multithreaded. Reviewed-by: Charlie Jenkins <charlie@rivosinc.com> Tested-by: Charlie Jenkins <charlie@rivosinc.com> Signed-off-by: Samuel Holland <samuel.holland@sifive.com> Link: https://lore.kernel.org/r/20241016202814.4061541-6-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/include/asm/mmu.h7
-rw-r--r--arch/riscv/include/asm/mmu_context.h13
-rw-r--r--arch/riscv/include/asm/uaccess.h43
-rw-r--r--arch/riscv/kernel/process.c73
4 files changed, 126 insertions, 10 deletions
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
index c9e03e9da3dc..1cc90465d75b 100644
--- a/arch/riscv/include/asm/mmu.h
+++ b/arch/riscv/include/asm/mmu.h
@@ -26,8 +26,15 @@ typedef struct {
unsigned long exec_fdpic_loadmap;
unsigned long interp_fdpic_loadmap;
#endif
+ unsigned long flags;
+#ifdef CONFIG_RISCV_ISA_SUPM
+ u8 pmlen;
+#endif
} mm_context_t;
+/* Lock the pointer masking mode because this mm is multithreaded */
+#define MM_CONTEXT_LOCK_PMLEN 0
+
#define cntx2asid(cntx) ((cntx) & SATP_ASID_MASK)
#define cntx2version(cntx) ((cntx) & ~SATP_ASID_MASK)
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
index 7030837adc1a..8c4bc49a3a0f 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -20,6 +20,9 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
+#ifdef CONFIG_RISCV_ISA_SUPM
+ next->context.pmlen = 0;
+#endif
switch_mm(prev, next, NULL);
}
@@ -30,11 +33,21 @@ static inline int init_new_context(struct task_struct *tsk,
#ifdef CONFIG_MMU
atomic_long_set(&mm->context.id, 0);
#endif
+ if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM))
+ clear_bit(MM_CONTEXT_LOCK_PMLEN, &mm->context.flags);
return 0;
}
DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
+#ifdef CONFIG_RISCV_ISA_SUPM
+#define mm_untag_mask mm_untag_mask
+static inline unsigned long mm_untag_mask(struct mm_struct *mm)
+{
+ return -1UL >> mm->context.pmlen;
+}
+#endif
+
#include <asm-generic/mmu_context.h>
#endif /* _ASM_RISCV_MMU_CONTEXT_H */
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 72ec1d9bd3f3..fee56b0c8058 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -9,8 +9,41 @@
#define _ASM_RISCV_UACCESS_H
#include <asm/asm-extable.h>
+#include <asm/cpufeature.h>
#include <asm/pgtable.h> /* for TASK_SIZE */
+#ifdef CONFIG_RISCV_ISA_SUPM
+static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, unsigned long addr)
+{
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) {
+ u8 pmlen = mm->context.pmlen;
+
+ /* Virtual addresses are sign-extended; physical addresses are zero-extended. */
+ if (IS_ENABLED(CONFIG_MMU))
+ return (long)(addr << pmlen) >> pmlen;
+ else
+ return (addr << pmlen) >> pmlen;
+ }
+
+ return addr;
+}
+
+#define untagged_addr(addr) ({ \
+ unsigned long __addr = (__force unsigned long)(addr); \
+ (__force __typeof__(addr))__untagged_addr_remote(current->mm, __addr); \
+})
+
+#define untagged_addr_remote(mm, addr) ({ \
+ unsigned long __addr = (__force unsigned long)(addr); \
+ mmap_assert_locked(mm); \
+ (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \
+})
+
+#define access_ok(addr, size) likely(__access_ok(untagged_addr(addr), size))
+#else
+#define untagged_addr(addr) (addr)
+#endif
+
/*
* User space memory access functions
*/
@@ -130,7 +163,7 @@ do { \
*/
#define __get_user(x, ptr) \
({ \
- const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ const __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \
long __gu_err = 0; \
\
__chk_user_ptr(__gu_ptr); \
@@ -246,7 +279,7 @@ do { \
*/
#define __put_user(x, ptr) \
({ \
- __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \
__typeof__(*__gu_ptr) __val = (x); \
long __pu_err = 0; \
\
@@ -293,13 +326,13 @@ unsigned long __must_check __asm_copy_from_user(void *to,
static inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- return __asm_copy_from_user(to, from, n);
+ return __asm_copy_from_user(to, untagged_addr(from), n);
}
static inline unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- return __asm_copy_to_user(to, from, n);
+ return __asm_copy_to_user(untagged_addr(to), from, n);
}
extern long strncpy_from_user(char *dest, const char __user *src, long count);
@@ -314,7 +347,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
{
might_fault();
return access_ok(to, n) ?
- __clear_user(to, n) : n;
+ __clear_user(untagged_addr(to), n) : n;
}
#define __get_kernel_nofault(dst, src, type, err_label) \
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 200d2ed64dfe..58b6482c2bf6 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -213,6 +213,10 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
unsigned long tls = args->tls;
struct pt_regs *childregs = task_pt_regs(p);
+ /* Ensure all threads in this mm have the same pointer masking mode. */
+ if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM) && p->mm && (clone_flags & CLONE_VM))
+ set_bit(MM_CONTEXT_LOCK_PMLEN, &p->mm->context.flags);
+
memset(&p->thread.s, 0, sizeof(p->thread.s));
/* p->thread holds context to be restored by __switch_to() */
@@ -258,10 +262,16 @@ enum {
static bool have_user_pmlen_7;
static bool have_user_pmlen_16;
+/*
+ * Control the relaxed ABI allowing tagged user addresses into the kernel.
+ */
+static unsigned int tagged_addr_disabled;
+
long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
{
- unsigned long valid_mask = PR_PMLEN_MASK;
+ unsigned long valid_mask = PR_PMLEN_MASK | PR_TAGGED_ADDR_ENABLE;
struct thread_info *ti = task_thread_info(task);
+ struct mm_struct *mm = task->mm;
unsigned long pmm;
u8 pmlen;
@@ -276,16 +286,41 @@ long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
* in case choosing a larger PMLEN has a performance impact.
*/
pmlen = FIELD_GET(PR_PMLEN_MASK, arg);
- if (pmlen == PMLEN_0)
+ if (pmlen == PMLEN_0) {
pmm = ENVCFG_PMM_PMLEN_0;
- else if (pmlen <= PMLEN_7 && have_user_pmlen_7)
+ } else if (pmlen <= PMLEN_7 && have_user_pmlen_7) {
+ pmlen = PMLEN_7;
pmm = ENVCFG_PMM_PMLEN_7;
- else if (pmlen <= PMLEN_16 && have_user_pmlen_16)
+ } else if (pmlen <= PMLEN_16 && have_user_pmlen_16) {
+ pmlen = PMLEN_16;
pmm = ENVCFG_PMM_PMLEN_16;
- else
+ } else {
return -EINVAL;
+ }
+
+ /*
+ * Do not allow the enabling of the tagged address ABI if globally
+ * disabled via sysctl abi.tagged_addr_disabled, if pointer masking
+ * is disabled for userspace.
+ */
+ if (arg & PR_TAGGED_ADDR_ENABLE && (tagged_addr_disabled || !pmlen))
+ return -EINVAL;
+
+ if (!(arg & PR_TAGGED_ADDR_ENABLE))
+ pmlen = PMLEN_0;
+
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
+ if (test_bit(MM_CONTEXT_LOCK_PMLEN, &mm->context.flags) && mm->context.pmlen != pmlen) {
+ mmap_write_unlock(mm);
+ return -EBUSY;
+ }
envcfg_update_bits(task, ENVCFG_PMM, pmm);
+ mm->context.pmlen = pmlen;
+
+ mmap_write_unlock(mm);
return 0;
}
@@ -298,6 +333,10 @@ long get_tagged_addr_ctrl(struct task_struct *task)
if (is_compat_thread(ti))
return -EINVAL;
+ /*
+ * The mm context's pmlen is set only when the tagged address ABI is
+ * enabled, so the effective PMLEN must be extracted from envcfg.PMM.
+ */
switch (task->thread.envcfg & ENVCFG_PMM) {
case ENVCFG_PMM_PMLEN_7:
ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_7);
@@ -307,6 +346,9 @@ long get_tagged_addr_ctrl(struct task_struct *task)
break;
}
+ if (task->mm->context.pmlen)
+ ret |= PR_TAGGED_ADDR_ENABLE;
+
return ret;
}
@@ -316,6 +358,24 @@ static bool try_to_set_pmm(unsigned long value)
return (csr_read_clear(CSR_ENVCFG, ENVCFG_PMM) & ENVCFG_PMM) == value;
}
+/*
+ * Global sysctl to disable the tagged user addresses support. This control
+ * only prevents the tagged address ABI enabling via prctl() and does not
+ * disable it for tasks that already opted in to the relaxed ABI.
+ */
+
+static struct ctl_table tagged_addr_sysctl_table[] = {
+ {
+ .procname = "tagged_addr_disabled",
+ .mode = 0644,
+ .data = &tagged_addr_disabled,
+ .maxlen = sizeof(int),
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+};
+
static int __init tagged_addr_init(void)
{
if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
@@ -329,6 +389,9 @@ static int __init tagged_addr_init(void)
have_user_pmlen_7 = try_to_set_pmm(ENVCFG_PMM_PMLEN_7);
have_user_pmlen_16 = try_to_set_pmm(ENVCFG_PMM_PMLEN_16);
+ if (!register_sysctl("abi", tagged_addr_sysctl_table))
+ return -EINVAL;
+
return 0;
}
core_initcall(tagged_addr_init);