diff options
author | Baolin Wang <baolin.wang@linux.alibaba.com> | 2024-07-22 07:43:18 +0200 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-09-02 05:25:44 +0200 |
commit | d58a2a581f132529eefac5377676011562b631b8 (patch) | |
tree | 5f51bd4d61a1e9d82e5e420da5d35a739138d106 /mm/shmem.c | |
parent | mm: shmem: simplify the suitable huge orders validation for tmpfs (diff) | |
download | linux-d58a2a581f132529eefac5377676011562b631b8.tar.xz linux-d58a2a581f132529eefac5377676011562b631b8.zip |
mm: shmem: rename shmem_is_huge() to shmem_huge_global_enabled()
shmem_is_huge() is now used to check if the top-level huge page is
enabled, thus rename it to reflect its usage.
Link: https://lkml.kernel.org/r/da53296e0ab6359aa083561d9dc01e4223d60fbe.1721626645.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Barry Song <21cnbao@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r-- | mm/shmem.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index 7889b499d33f..2a86b0d9f516 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -548,9 +548,9 @@ static bool shmem_confirm_swap(struct address_space *mapping, static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; -static bool __shmem_is_huge(struct inode *inode, pgoff_t index, - bool shmem_huge_force, struct mm_struct *mm, - unsigned long vm_flags) +static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index, + bool shmem_huge_force, struct mm_struct *mm, + unsigned long vm_flags) { loff_t i_size; @@ -581,14 +581,15 @@ static bool __shmem_is_huge(struct inode *inode, pgoff_t index, } } -bool shmem_is_huge(struct inode *inode, pgoff_t index, +bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, bool shmem_huge_force, struct mm_struct *mm, unsigned long vm_flags) { if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) return false; - return __shmem_is_huge(inode, index, shmem_huge_force, mm, vm_flags); + return __shmem_huge_global_enabled(inode, index, shmem_huge_force, + mm, vm_flags); } #if defined(CONFIG_SYSFS) @@ -1156,7 +1157,7 @@ static int shmem_getattr(struct mnt_idmap *idmap, STATX_ATTR_NODUMP); generic_fillattr(idmap, request_mask, inode, stat); - if (shmem_is_huge(inode, 0, false, NULL, 0)) + if (shmem_huge_global_enabled(inode, 0, false, NULL, 0)) stat->blksize = HPAGE_PMD_SIZE; if (request_mask & STATX_BTIME) { @@ -2149,7 +2150,7 @@ repeat: return 0; } - huge = shmem_is_huge(inode, index, false, fault_mm, + huge = shmem_huge_global_enabled(inode, index, false, fault_mm, vma ? vma->vm_flags : 0); /* Find hugepage orders that are allowed for anonymous shmem. */ if (vma && vma_is_anon_shmem(vma)) |