diff options
Diffstat (limited to 'arch/riscv/include/asm')
-rw-r--r-- | arch/riscv/include/asm/alternative-macros.h | 2 | ||||
-rw-r--r-- | arch/riscv/include/asm/cache.h | 14 | ||||
-rw-r--r-- | arch/riscv/include/asm/cacheflush.h | 2 | ||||
-rw-r--r-- | arch/riscv/include/asm/cfi.h | 22 | ||||
-rw-r--r-- | arch/riscv/include/asm/elf.h | 13 | ||||
-rw-r--r-- | arch/riscv/include/asm/hwcap.h | 17 | ||||
-rw-r--r-- | arch/riscv/include/asm/insn.h | 10 | ||||
-rw-r--r-- | arch/riscv/include/asm/mmu.h | 4 | ||||
-rw-r--r-- | arch/riscv/include/asm/pgtable.h | 33 | ||||
-rw-r--r-- | arch/riscv/include/asm/processor.h | 52 | ||||
-rw-r--r-- | arch/riscv/include/asm/syscall.h | 5 | ||||
-rw-r--r-- | arch/riscv/include/asm/syscall_wrapper.h | 87 |
12 files changed, 239 insertions, 22 deletions
diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h index b8c55fb3ab2c..721ec275ce57 100644 --- a/arch/riscv/include/asm/alternative-macros.h +++ b/arch/riscv/include/asm/alternative-macros.h @@ -146,7 +146,7 @@ * vendor_id: The CPU vendor ID. * patch_id: The patch ID (erratum ID or cpufeature ID). * CONFIG_k: The Kconfig of this patch ID. When Kconfig is disabled, the old - * content will alwyas be executed. + * content will always be executed. */ #define ALTERNATIVE(old_content, new_content, vendor_id, patch_id, CONFIG_k) \ _ALTERNATIVE_CFG(old_content, new_content, vendor_id, patch_id, CONFIG_k) diff --git a/arch/riscv/include/asm/cache.h b/arch/riscv/include/asm/cache.h index d3036df23ccb..2174fe7bac9a 100644 --- a/arch/riscv/include/asm/cache.h +++ b/arch/riscv/include/asm/cache.h @@ -13,6 +13,7 @@ #ifdef CONFIG_RISCV_DMA_NONCOHERENT #define ARCH_DMA_MINALIGN L1_CACHE_BYTES +#define ARCH_KMALLOC_MINALIGN (8) #endif /* @@ -23,4 +24,17 @@ #define ARCH_SLAB_MINALIGN 16 #endif +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_RISCV_DMA_NONCOHERENT +extern int dma_cache_alignment; +#define dma_get_cache_alignment dma_get_cache_alignment +static inline int dma_get_cache_alignment(void) +{ + return dma_cache_alignment; +} +#endif + +#endif /* __ASSEMBLY__ */ + #endif /* _ASM_RISCV_CACHE_H */ diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index c4dca559bb97..3cb53c4df27c 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -58,8 +58,10 @@ void riscv_init_cbo_blocksizes(void); #ifdef CONFIG_RISCV_DMA_NONCOHERENT void riscv_noncoherent_supported(void); +void __init riscv_set_dma_cache_alignment(void); #else static inline void riscv_noncoherent_supported(void) {} +static inline void riscv_set_dma_cache_alignment(void) {} #endif /* diff --git a/arch/riscv/include/asm/cfi.h b/arch/riscv/include/asm/cfi.h new file mode 100644 index 000000000000..56bf9d69d5e3 --- /dev/null +++ b/arch/riscv/include/asm/cfi.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_CFI_H +#define _ASM_RISCV_CFI_H + +/* + * Clang Control Flow Integrity (CFI) support. + * + * Copyright (C) 2023 Google LLC + */ + +#include <linux/cfi.h> + +#ifdef CONFIG_CFI_CLANG +enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); +#else +static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs) +{ + return BUG_TRAP_TYPE_NONE; +} +#endif /* CONFIG_CFI_CLANG */ + +#endif /* _ASM_RISCV_CFI_H */ diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h index c24280774caf..b3b2dfbdf945 100644 --- a/arch/riscv/include/asm/elf.h +++ b/arch/riscv/include/asm/elf.h @@ -41,6 +41,7 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr); #define compat_elf_check_arch compat_elf_check_arch #define CORE_DUMP_USE_REGSET +#define ELF_FDPIC_CORE_EFLAGS 0 #define ELF_EXEC_PAGESIZE (PAGE_SIZE) /* @@ -49,7 +50,7 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr); * the loader. We need to make sure that it is out of the way of the program * that it will "exec", and that there is sufficient room for the brk. */ -#define ELF_ET_DYN_BASE ((TASK_SIZE / 3) * 2) +#define ELF_ET_DYN_BASE ((DEFAULT_MAP_WINDOW / 3) * 2) #ifdef CONFIG_64BIT #ifdef CONFIG_COMPAT @@ -69,6 +70,13 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr); #define ELF_HWCAP riscv_get_elf_hwcap() extern unsigned long elf_hwcap; +#define ELF_FDPIC_PLAT_INIT(_r, _exec_map_addr, _interp_map_addr, dynamic_addr) \ + do { \ + (_r)->a1 = _exec_map_addr; \ + (_r)->a2 = _interp_map_addr; \ + (_r)->a3 = dynamic_addr; \ + } while (0) + /* * This yields a string that ld.so will use to load implementation * specific libraries for optimization. This is more specific in @@ -78,7 +86,6 @@ extern unsigned long elf_hwcap; #define COMPAT_ELF_PLATFORM (NULL) -#ifdef CONFIG_MMU #define ARCH_DLINFO \ do { \ /* \ @@ -115,6 +122,8 @@ do { \ else \ NEW_AUX_ENT(AT_IGNORE, 0); \ } while (0) + +#ifdef CONFIG_MMU #define ARCH_HAS_SETUP_ADDITIONAL_PAGES struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index f041bfa7f6a0..b7b58258f6c7 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -14,12 +14,17 @@ #include <uapi/asm/hwcap.h> #define RISCV_ISA_EXT_a ('a' - 'a') +#define RISCV_ISA_EXT_b ('b' - 'a') #define RISCV_ISA_EXT_c ('c' - 'a') #define RISCV_ISA_EXT_d ('d' - 'a') #define RISCV_ISA_EXT_f ('f' - 'a') #define RISCV_ISA_EXT_h ('h' - 'a') #define RISCV_ISA_EXT_i ('i' - 'a') +#define RISCV_ISA_EXT_j ('j' - 'a') +#define RISCV_ISA_EXT_k ('k' - 'a') #define RISCV_ISA_EXT_m ('m' - 'a') +#define RISCV_ISA_EXT_p ('p' - 'a') +#define RISCV_ISA_EXT_q ('q' - 'a') #define RISCV_ISA_EXT_s ('s' - 'a') #define RISCV_ISA_EXT_u ('u' - 'a') #define RISCV_ISA_EXT_v ('v' - 'a') @@ -55,7 +60,6 @@ #define RISCV_ISA_EXT_ZIHPM 42 #define RISCV_ISA_EXT_MAX 64 -#define RISCV_ISA_EXT_NAME_LEN_MAX 32 #ifdef CONFIG_RISCV_M_MODE #define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SMAIA @@ -70,12 +74,15 @@ unsigned long riscv_get_elf_hwcap(void); struct riscv_isa_ext_data { - /* Name of the extension displayed to userspace via /proc/cpuinfo */ - char uprop[RISCV_ISA_EXT_NAME_LEN_MAX]; - /* The logical ISA extension ID */ - unsigned int isa_ext_id; + const unsigned int id; + const char *name; + const char *property; }; +extern const struct riscv_isa_ext_data riscv_isa_ext[]; +extern const size_t riscv_isa_ext_count; +extern bool riscv_isa_fallback; + unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); #define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext) diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h index fce00400c9bc..06e439eeef9a 100644 --- a/arch/riscv/include/asm/insn.h +++ b/arch/riscv/include/asm/insn.h @@ -63,6 +63,7 @@ #define RVG_RS1_OPOFF 15 #define RVG_RS2_OPOFF 20 #define RVG_RD_OPOFF 7 +#define RVG_RS1_MASK GENMASK(4, 0) #define RVG_RD_MASK GENMASK(4, 0) /* The bit field of immediate value in RVC J instruction */ @@ -130,6 +131,7 @@ #define RVC_C2_RS1_OPOFF 7 #define RVC_C2_RS2_OPOFF 2 #define RVC_C2_RD_OPOFF 7 +#define RVC_C2_RS1_MASK GENMASK(4, 0) /* parts of opcode for RVG*/ #define RVG_OPCODE_FENCE 0x0f @@ -289,6 +291,10 @@ static __always_inline bool riscv_insn_is_c_jalr(u32 code) #define RV_X(X, s, mask) (((X) >> (s)) & (mask)) #define RVC_X(X, s, mask) RV_X(X, s, mask) +#define RV_EXTRACT_RS1_REG(x) \ + ({typeof(x) x_ = (x); \ + (RV_X(x_, RVG_RS1_OPOFF, RVG_RS1_MASK)); }) + #define RV_EXTRACT_RD_REG(x) \ ({typeof(x) x_ = (x); \ (RV_X(x_, RVG_RD_OPOFF, RVG_RD_MASK)); }) @@ -316,6 +322,10 @@ static __always_inline bool riscv_insn_is_c_jalr(u32 code) (RV_X(x_, RV_B_IMM_11_OPOFF, RV_B_IMM_11_MASK) << RV_B_IMM_11_OFF) | \ (RV_IMM_SIGN(x_) << RV_B_IMM_SIGN_OFF); }) +#define RVC_EXTRACT_C2_RS1_REG(x) \ + ({typeof(x) x_ = (x); \ + (RV_X(x_, RVC_C2_RS1_OPOFF, RVC_C2_RS1_MASK)); }) + #define RVC_EXTRACT_JTYPE_IMM(x) \ ({typeof(x) x_ = (x); \ (RVC_X(x_, RVC_J_IMM_3_1_OPOFF, RVC_J_IMM_3_1_MASK) << RVC_J_IMM_3_1_OFF) | \ diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index 0099dc116168..355504b37f8e 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -20,6 +20,10 @@ typedef struct { /* A local icache flush is needed before user execution can resume. */ cpumask_t icache_stale_mask; #endif +#ifdef CONFIG_BINFMT_ELF_FDPIC + unsigned long exec_fdpic_loadmap; + unsigned long interp_fdpic_loadmap; +#endif } mm_context_t; void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa, diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 2f1c0cde2ca4..b2ba3f79cfe9 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -62,11 +62,16 @@ * struct pages to map half the virtual address space. Then * position vmemmap directly below the VMALLOC region. */ +#define VA_BITS_SV32 32 #ifdef CONFIG_64BIT +#define VA_BITS_SV39 39 +#define VA_BITS_SV48 48 +#define VA_BITS_SV57 57 + #define VA_BITS (pgtable_l5_enabled ? \ - 57 : (pgtable_l4_enabled ? 48 : 39)) + VA_BITS_SV57 : (pgtable_l4_enabled ? VA_BITS_SV48 : VA_BITS_SV39)) #else -#define VA_BITS 32 +#define VA_BITS VA_BITS_SV32 #endif #define VMEMMAP_SHIFT \ @@ -111,11 +116,27 @@ #include <asm/page.h> #include <asm/tlbflush.h> #include <linux/mm_types.h> +#include <asm/compat.h> #define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT) #ifdef CONFIG_64BIT #include <asm/pgtable-64.h> + +#define VA_USER_SV39 (UL(1) << (VA_BITS_SV39 - 1)) +#define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1)) +#define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1)) + +#ifdef CONFIG_COMPAT +#define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS) +#define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39) +#define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64) +#define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64) +#else +#define MMAP_VA_BITS ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS) +#define MMAP_MIN_VA_BITS (VA_BITS_SV39) +#endif /* CONFIG_COMPAT */ + #else #include <asm/pgtable-32.h> #endif /* CONFIG_64BIT */ @@ -843,14 +864,16 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. * Note that PGDIR_SIZE must evenly divide TASK_SIZE. * Task size is: - * - 0x9fc00000 (~2.5GB) for RV32. - * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu - * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu + * - 0x9fc00000 (~2.5GB) for RV32. + * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu + * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu + * - 0x100000000000000 ( 64PB) for RV64 using SV57 mmu * * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V * Instruction Set Manual Volume II: Privileged Architecture" states that * "load and store effective addresses, which are 64bits, must have bits * 63–48 all equal to bit 47, or else a page-fault exception will occur." + * Similarly for SV57, bits 63–57 must be equal to bit 56. */ #ifdef CONFIG_64BIT #define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2) diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index c950a8d9edef..3e23e1786d05 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -13,19 +13,59 @@ #include <asm/ptrace.h> +#ifdef CONFIG_64BIT +#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1)) +#define STACK_TOP_MAX TASK_SIZE_64 + +#define arch_get_mmap_end(addr, len, flags) \ +({ \ + unsigned long mmap_end; \ + typeof(addr) _addr = (addr); \ + if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \ + mmap_end = STACK_TOP_MAX; \ + else if ((_addr) >= VA_USER_SV57) \ + mmap_end = STACK_TOP_MAX; \ + else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \ + mmap_end = VA_USER_SV48; \ + else \ + mmap_end = VA_USER_SV39; \ + mmap_end; \ +}) + +#define arch_get_mmap_base(addr, base) \ +({ \ + unsigned long mmap_base; \ + typeof(addr) _addr = (addr); \ + typeof(base) _base = (base); \ + unsigned long rnd_gap = DEFAULT_MAP_WINDOW - (_base); \ + if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \ + mmap_base = (_base); \ + else if (((_addr) >= VA_USER_SV57) && (VA_BITS >= VA_BITS_SV57)) \ + mmap_base = VA_USER_SV57 - rnd_gap; \ + else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \ + mmap_base = VA_USER_SV48 - rnd_gap; \ + else \ + mmap_base = VA_USER_SV39 - rnd_gap; \ + mmap_base; \ +}) + +#else +#define DEFAULT_MAP_WINDOW TASK_SIZE +#define STACK_TOP_MAX TASK_SIZE +#endif +#define STACK_ALIGN 16 + +#define STACK_TOP DEFAULT_MAP_WINDOW + /* * This decides where the kernel will search for a free chunk of vm * space during mmap's. */ -#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) - -#define STACK_TOP TASK_SIZE #ifdef CONFIG_64BIT -#define STACK_TOP_MAX TASK_SIZE_64 +#define TASK_UNMAPPED_BASE PAGE_ALIGN((UL(1) << MMAP_MIN_VA_BITS) / 3) #else -#define STACK_TOP_MAX TASK_SIZE +#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) #endif -#define STACK_ALIGN 16 #ifndef __ASSEMBLY__ diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index 0148c6bd9675..121fff429dce 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -75,7 +75,7 @@ static inline int syscall_get_arch(struct task_struct *task) #endif } -typedef long (*syscall_t)(ulong, ulong, ulong, ulong, ulong, ulong, ulong); +typedef long (*syscall_t)(const struct pt_regs *); static inline void syscall_handler(struct pt_regs *regs, ulong syscall) { syscall_t fn; @@ -87,8 +87,7 @@ static inline void syscall_handler(struct pt_regs *regs, ulong syscall) #endif fn = sys_call_table[syscall]; - regs->a0 = fn(regs->orig_a0, regs->a1, regs->a2, - regs->a3, regs->a4, regs->a5, regs->a6); + regs->a0 = fn(regs); } static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs) diff --git a/arch/riscv/include/asm/syscall_wrapper.h b/arch/riscv/include/asm/syscall_wrapper.h new file mode 100644 index 000000000000..1d7942c8a6cb --- /dev/null +++ b/arch/riscv/include/asm/syscall_wrapper.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * syscall_wrapper.h - riscv specific wrappers to syscall definitions + * + * Based on arch/arm64/include/syscall_wrapper.h + */ + +#ifndef __ASM_SYSCALL_WRAPPER_H +#define __ASM_SYSCALL_WRAPPER_H + +#include <asm/ptrace.h> + +asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *); + +#define SC_RISCV_REGS_TO_ARGS(x, ...) \ + __MAP(x,__SC_ARGS \ + ,,regs->orig_a0,,regs->a1,,regs->a2 \ + ,,regs->a3,,regs->a4,,regs->a5,,regs->a6) + +#ifdef CONFIG_COMPAT + +#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__riscv_compat_sys##name, ERRNO); \ + static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs) \ + { \ + return __se_compat_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \ + } \ + static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \ + } \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +#define COMPAT_SYSCALL_DEFINE0(sname) \ + asmlinkage long __riscv_compat_sys_##sname(const struct pt_regs *__unused); \ + ALLOW_ERROR_INJECTION(__riscv_compat_sys_##sname, ERRNO); \ + asmlinkage long __riscv_compat_sys_##sname(const struct pt_regs *__unused) + +#define COND_SYSCALL_COMPAT(name) \ + asmlinkage long __weak __riscv_compat_sys_##name(const struct pt_regs *regs); \ + asmlinkage long __weak __riscv_compat_sys_##name(const struct pt_regs *regs) \ + { \ + return sys_ni_syscall(); \ + } + +#define COMPAT_SYS_NI(name) \ + SYSCALL_ALIAS(__riscv_compat_sys_##name, sys_ni_posix_timers); + +#endif /* CONFIG_COMPAT */ + +#define __SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long __riscv_sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__riscv_sys##name, ERRNO); \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + asmlinkage long __riscv_sys##name(const struct pt_regs *regs) \ + { \ + return __se_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \ + } \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ + return ret; \ + } \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long __riscv_sys_##sname(const struct pt_regs *__unused); \ + ALLOW_ERROR_INJECTION(__riscv_sys_##sname, ERRNO); \ + asmlinkage long __riscv_sys_##sname(const struct pt_regs *__unused) + +#define COND_SYSCALL(name) \ + asmlinkage long __weak __riscv_sys_##name(const struct pt_regs *regs); \ + asmlinkage long __weak __riscv_sys_##name(const struct pt_regs *regs) \ + { \ + return sys_ni_syscall(); \ + } + +#define SYS_NI(name) SYSCALL_ALIAS(__riscv_sys_##name, sys_ni_posix_timers); + +#endif /* __ASM_SYSCALL_WRAPPER_H */ |