diff options
author | Andy Lutomirski <luto@kernel.org> | 2019-05-08 12:02:22 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2019-06-22 11:38:52 +0200 |
commit | 1ab5f3f7fe3d7548b4361b68c1fed140c6841af9 (patch) | |
tree | b7750b7eb78f164a5c9874fcf3cd21e5194af92d /arch/x86/kernel/process_64.c | |
parent | x86/fsgsbase/64: Enable FSGSBASE instructions in helper functions (diff) | |
download | linux-1ab5f3f7fe3d7548b4361b68c1fed140c6841af9.tar.xz linux-1ab5f3f7fe3d7548b4361b68c1fed140c6841af9.zip |
x86/process/64: Use FSBSBASE in switch_to() if available
With the new FSGSBASE instructions, FS and GSABSE can be efficiently read
and writen in __switch_to(). Use that capability to preserve the full
state.
This will enable user code to do whatever it wants with the new
instructions without any kernel-induced gotchas. (There can still be
architectural gotchas: movl %gs,%eax; movl %eax,%gs may change GSBASE if
WRGSBASE was used, but users are expected to read the CPU manual before
doing things like that.)
This is a considerable speedup. It seems to save about 100 cycles
per context switch compared to the baseline 4.6-rc1 behavior on a
Skylake laptop.
[ chang: 5~10% performance improvements were seen with a context switch
benchmark that ran threads with different FS/GSBASE values (to the
baseline 4.16). Minor edit on the changelog. ]
[ tglx: Masaage changelog ]
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Cc: Ravi Shankar <ravi.v.shankar@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Link: https://lkml.kernel.org/r/1557309753-24073-8-git-send-email-chang.seok.bae@intel.com
Diffstat (limited to '')
-rw-r--r-- | arch/x86/kernel/process_64.c | 34 |
1 files changed, 28 insertions, 6 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index c34ee0f72378..59013f480b86 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -244,8 +244,18 @@ static __always_inline void save_fsgs(struct task_struct *task) { savesegment(fs, task->thread.fsindex); savesegment(gs, task->thread.gsindex); - save_base_legacy(task, task->thread.fsindex, FS); - save_base_legacy(task, task->thread.gsindex, GS); + if (static_cpu_has(X86_FEATURE_FSGSBASE)) { + /* + * If FSGSBASE is enabled, we can't make any useful guesses + * about the base, and user code expects us to save the current + * value. Fortunately, reading the base directly is efficient. + */ + task->thread.fsbase = rdfsbase(); + task->thread.gsbase = __rdgsbase_inactive(); + } else { + save_base_legacy(task, task->thread.fsindex, FS); + save_base_legacy(task, task->thread.gsindex, GS); + } } #if IS_ENABLED(CONFIG_KVM) @@ -324,10 +334,22 @@ static __always_inline void load_seg_legacy(unsigned short prev_index, static __always_inline void x86_fsgsbase_load(struct thread_struct *prev, struct thread_struct *next) { - load_seg_legacy(prev->fsindex, prev->fsbase, - next->fsindex, next->fsbase, FS); - load_seg_legacy(prev->gsindex, prev->gsbase, - next->gsindex, next->gsbase, GS); + if (static_cpu_has(X86_FEATURE_FSGSBASE)) { + /* Update the FS and GS selectors if they could have changed. */ + if (unlikely(prev->fsindex || next->fsindex)) + loadseg(FS, next->fsindex); + if (unlikely(prev->gsindex || next->gsindex)) + loadseg(GS, next->gsindex); + + /* Update the bases. */ + wrfsbase(next->fsbase); + __wrgsbase_inactive(next->gsbase); + } else { + load_seg_legacy(prev->fsindex, prev->fsbase, + next->fsindex, next->fsbase, FS); + load_seg_legacy(prev->gsindex, prev->gsbase, + next->gsindex, next->gsbase, GS); + } } static unsigned long x86_fsgsbase_read_task(struct task_struct *task, |