diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-21 22:20:41 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-21 22:20:41 +0100 |
commit | 08179b47e1fdf288e5d59f90e5ce31513bb019c3 (patch) | |
tree | cdf5dc1eecca783234eba22624716d083b62717b /arch/parisc/include/asm/pgalloc.h | |
parent | Merge tag 'mips_5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/l... (diff) | |
parent | binfmt_misc: pass binfmt_misc flags to the interpreter (diff) | |
download | linux-08179b47e1fdf288e5d59f90e5ce31513bb019c3.tar.xz linux-08179b47e1fdf288e5d59f90e5ce31513bb019c3.zip |
Merge branch 'parisc-5.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux
Pull parisc updates from Helge Deller:
- Optimize parisc page table locks by using the existing
page_table_lock
- Export argv0-preserve flag in binfmt_misc for usage in qemu-user
- Fix interrupt table (IVT) checksum so firmware will call crash
handler (HPMC)
- Increase IRQ stack to 64kb on 64-bit kernel
- Switch to common devmem_is_allowed() implementation
- Minor fix to get_whan()
* 'parisc-5.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
binfmt_misc: pass binfmt_misc flags to the interpreter
parisc: Optimize per-pagetable spinlocks
parisc: Replace test_ti_thread_flag() with test_tsk_thread_flag()
parisc: Bump 64-bit IRQ stack size to 64 KB
parisc: Fix IVT checksum calculation wrt HPMC
parisc: Use the generic devmem_is_allowed()
parisc: Drop out of get_whan() if task is running again
Diffstat (limited to 'arch/parisc/include/asm/pgalloc.h')
-rw-r--r-- | arch/parisc/include/asm/pgalloc.h | 76 |
1 files changed, 18 insertions, 58 deletions
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index a6482b2ce0ea..dda557085311 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -15,47 +15,23 @@ #define __HAVE_ARCH_PGD_FREE #include <asm-generic/pgalloc.h> -/* Allocate the top level pgd (page directory) - * - * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we - * allocate the first pmd adjacent to the pgd. This means that we can - * subtract a constant offset to get to it. The pmd and pgd sizes are - * arranged so that a single pmd covers 4GB (giving a full 64-bit - * process access to 8TB) so our lookups are effectively L2 for the - * first 4GB of the kernel (i.e. for all ILP32 processes and all the - * kernel for machines with under 4GB of memory) */ +/* Allocate the top level pgd (page directory) */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, - PGD_ALLOC_ORDER); - pgd_t *actual_pgd = pgd; + pgd_t *pgd; - if (likely(pgd != NULL)) { - memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); -#if CONFIG_PGTABLE_LEVELS == 3 - actual_pgd += PTRS_PER_PGD; - /* Populate first pmd with allocated memory. We mark it - * with PxD_FLAG_ATTACHED as a signal to the system that this - * pmd entry may not be cleared. */ - set_pgd(actual_pgd, __pgd((PxD_FLAG_PRESENT | - PxD_FLAG_VALID | - PxD_FLAG_ATTACHED) - + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT))); - /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as - * a signal that this pmd may not be freed */ - set_pgd(pgd, __pgd(PxD_FLAG_ATTACHED)); -#endif - } - spin_lock_init(pgd_spinlock(actual_pgd)); - return actual_pgd; + pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + if (unlikely(pgd == NULL)) + return NULL; + + memset(pgd, 0, PAGE_SIZE << PGD_ORDER); + + return pgd; } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { -#if CONFIG_PGTABLE_LEVELS == 3 - pgd -= PTRS_PER_PGD; -#endif - free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); + free_pages((unsigned long)pgd, PGD_ORDER); } #if CONFIG_PGTABLE_LEVELS == 3 @@ -70,41 +46,25 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) { - return (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER); + pmd_t *pmd; + + pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER); + if (likely(pmd)) + memset ((void *)pmd, 0, PAGE_SIZE << PMD_ORDER); + return pmd; } static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { - if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) { - /* - * This is the permanent pmd attached to the pgd; - * cannot free it. - * Increment the counter to compensate for the decrement - * done by generic mm code. - */ - mm_inc_nr_pmds(mm); - return; - } free_pages((unsigned long)pmd, PMD_ORDER); } - #endif static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { -#if CONFIG_PGTABLE_LEVELS == 3 - /* preserve the gateway marker if this is the beginning of - * the permanent pmd */ - if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) - set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | - PxD_FLAG_VALID | - PxD_FLAG_ATTACHED) - + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); - else -#endif - set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID) - + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); + set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID) + + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); } #define pmd_populate(mm, pmd, pte_page) \ |