diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2015-12-01 04:36:26 +0100 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2015-12-14 05:19:04 +0100 |
commit | 26b6a3d9bb48f8b4624a62281bc2a295df3a8109 (patch) | |
tree | 5cd9d811b9f786e28b22486c52da954b1d69ff75 /arch/powerpc/include/asm/book3s | |
parent | powerpc/mm: Fix infinite loop in hash fault with 4K page size (diff) | |
download | linux-26b6a3d9bb48f8b4624a62281bc2a295df3a8109.tar.xz linux-26b6a3d9bb48f8b4624a62281bc2a295df3a8109.zip |
powerpc/mm: move pte headers to book3s directory
Acked-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/include/asm/book3s')
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/hash.h | 46 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/hash-4k.h | 17 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/hash-64k.h | 102 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/hash.h | 54 |
4 files changed, 219 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/hash.h b/arch/powerpc/include/asm/book3s/32/hash.h new file mode 100644 index 000000000000..264b754d65b0 --- /dev/null +++ b/arch/powerpc/include/asm/book3s/32/hash.h @@ -0,0 +1,46 @@ +#ifndef _ASM_POWERPC_BOOK3S_32_HASH_H +#define _ASM_POWERPC_BOOK3S_32_HASH_H +#ifdef __KERNEL__ + +/* + * The "classic" 32-bit implementation of the PowerPC MMU uses a hash + * table containing PTEs, together with a set of 16 segment registers, + * to define the virtual to physical address mapping. + * + * We use the hash table as an extended TLB, i.e. a cache of currently + * active mappings. We maintain a two-level page table tree, much + * like that used by the i386, for the sake of the Linux memory + * management code. Low-level assembler code in hash_low_32.S + * (procedure hash_page) is responsible for extracting ptes from the + * tree and putting them into the hash table when necessary, and + * updating the accessed and modified bits in the page table tree. + */ + +#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */ +#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */ +#define _PAGE_USER 0x004 /* usermode access allowed */ +#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ +#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ +#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ +#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ +#define _PAGE_DIRTY 0x080 /* C: page changed */ +#define _PAGE_ACCESSED 0x100 /* R: page referenced */ +#define _PAGE_RW 0x400 /* software: user write access allowed */ +#define _PAGE_SPECIAL 0x800 /* software: Special page */ + +#ifdef CONFIG_PTE_64BIT +/* We never clear the high word of the pte */ +#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE) +#else +#define _PTE_NONE_MASK _PAGE_HASHPTE +#endif + +#define _PMD_PRESENT 0 +#define _PMD_PRESENT_MASK (PAGE_MASK) +#define _PMD_BAD (~PAGE_MASK) + +/* Hash table based platforms need atomic updates of the linux PTE */ +#define PTE_ATOMIC_UPDATES 1 + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_BOOK3S_32_HASH_H */ diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h new file mode 100644 index 000000000000..c134e809aac3 --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -0,0 +1,17 @@ +/* To be include by pgtable-hash64.h only */ + +/* PTE bits */ +#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ +#define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ +#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ +#define _PAGE_F_SECOND _PAGE_SECONDARY +#define _PAGE_F_GIX _PAGE_GROUP_IX +#define _PAGE_SPECIAL 0x10000 /* software: special page */ + +/* PTE flags to conserve for HPTE identification */ +#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ + _PAGE_SECONDARY | _PAGE_GROUP_IX) + +/* shift to put page number into pte */ +#define PTE_RPN_SHIFT (17) + diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h new file mode 100644 index 000000000000..4f4ec2ab45c9 --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -0,0 +1,102 @@ +/* To be include by pgtable-hash64.h only */ + +/* Additional PTE bits (don't change without checking asm in hash_low.S) */ +#define _PAGE_SPECIAL 0x00000400 /* software: special page */ +#define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */ +#define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ +#define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ +#define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */ + +/* For 64K page, we don't have a separate _PAGE_HASHPTE bit. Instead, + * we set that to be the whole sub-bits mask. The C code will only + * test this, so a multi-bit mask will work. For combo pages, this + * is equivalent as effectively, the old _PAGE_HASHPTE was an OR of + * all the sub bits. For real 64k pages, we now have the assembly set + * _PAGE_HPTE_SUB0 in addition to setting the HIDX bits which overlap + * that mask. This is fine as long as the HIDX bits are never set on + * a PTE that isn't hashed, which is the case today. + * + * A little nit is for the huge page C code, which does the hashing + * in C, we need to provide which bit to use. + */ +#define _PAGE_HASHPTE _PAGE_HPTE_SUB + +/* Note the full page bits must be in the same location as for normal + * 4k pages as the same assembly will be used to insert 64K pages + * whether the kernel has CONFIG_PPC_64K_PAGES or not + */ +#define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ +#define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ + +/* PTE flags to conserve for HPTE identification */ +#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_COMBO) + +/* Shift to put page number into pte. + * + * That gives us a max RPN of 34 bits, which means a max of 50 bits + * of addressable physical space, or 46 bits for the special 4k PFNs. + */ +#define PTE_RPN_SHIFT (30) + +#ifndef __ASSEMBLY__ + +/* + * With 64K pages on hash table, we have a special PTE format that + * uses a second "half" of the page table to encode sub-page information + * in order to deal with 64K made of 4K HW pages. Thus we override the + * generic accessors and iterators here + */ +#define __real_pte __real_pte +static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) +{ + real_pte_t rpte; + + rpte.pte = pte; + rpte.hidx = 0; + if (pte_val(pte) & _PAGE_COMBO) { + /* + * Make sure we order the hidx load against the _PAGE_COMBO + * check. The store side ordering is done in __hash_page_4K + */ + smp_rmb(); + rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE)); + } + return rpte; +} + +static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) +{ + if ((pte_val(rpte.pte) & _PAGE_COMBO)) + return (rpte.hidx >> (index<<2)) & 0xf; + return (pte_val(rpte.pte) >> 12) & 0xf; +} + +#define __rpte_to_pte(r) ((r).pte) +#define __rpte_sub_valid(rpte, index) \ + (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index))) + +/* Trick: we set __end to va + 64k, which happens works for + * a 16M page as well as we want only one iteration + */ +#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ + do { \ + unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \ + unsigned __split = (psize == MMU_PAGE_4K || \ + psize == MMU_PAGE_64K_AP); \ + shift = mmu_psize_defs[psize].shift; \ + for (index = 0; vpn < __end; index++, \ + vpn += (1L << (shift - VPN_SHIFT))) { \ + if (!__split || __rpte_sub_valid(rpte, index)) \ + do { + +#define pte_iterate_hashed_end() } while(0); } } while(0) + +#define pte_pagesize_index(mm, addr, pte) \ + (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) + +#define remap_4k_pfn(vma, addr, pfn, prot) \ + (WARN_ON(((pfn) >= (1UL << (64 - PTE_RPN_SHIFT)))) ? -EINVAL : \ + remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ + __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))) + +#endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h new file mode 100644 index 000000000000..8e60d4fa434d --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -0,0 +1,54 @@ +#ifndef _ASM_POWERPC_BOOK3S_64_HASH_H +#define _ASM_POWERPC_BOOK3S_64_HASH_H +#ifdef __KERNEL__ + +/* + * Common bits between 4K and 64K pages in a linux-style PTE. + * These match the bits in the (hardware-defined) PowerPC PTE as closely + * as possible. Additional bits may be defined in pgtable-hash64-*.h + * + * Note: We only support user read/write permissions. Supervisor always + * have full read/write to pages above PAGE_OFFSET (pages below that + * always use the user access permissions). + * + * We could create separate kernel read-only if we used the 3 PP bits + * combinations that newer processors provide but we currently don't. + */ +#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ +#define _PAGE_USER 0x0002 /* matches one of the PP bits */ +#define _PAGE_BIT_SWAP_TYPE 2 +#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ +#define _PAGE_GUARDED 0x0008 +/* We can derive Memory coherence from _PAGE_NO_CACHE */ +#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ +#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ +#define _PAGE_DIRTY 0x0080 /* C: page changed */ +#define _PAGE_ACCESSED 0x0100 /* R: page referenced */ +#define _PAGE_RW 0x0200 /* software: user write access allowed */ +#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ + +/* No separate kernel read-only */ +#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */ +#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW + +/* Strong Access Ordering */ +#define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT) + +/* No page size encoding in the linux PTE */ +#define _PAGE_PSIZE 0 + +/* PTEIDX nibble */ +#define _PTEIDX_SECONDARY 0x8 +#define _PTEIDX_GROUP_IX 0x7 + +/* Hash table based platforms need atomic updates of the linux PTE */ +#define PTE_ATOMIC_UPDATES 1 + +#ifdef CONFIG_PPC_64K_PAGES +#include <asm/book3s/64/hash-64k.h> +#else +#include <asm/book3s/64/hash-4k.h> +#endif + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */ |