diff options
author | Baolin Wang <baolin.wang@linux.alibaba.com> | 2024-09-22 06:32:13 +0200 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-11-06 01:56:20 +0100 |
commit | d2d243df445a88c26e91eac02b041213c7a32e9e (patch) | |
tree | f5a8c7b4f8c267422714fb2a3f05637405de5830 /mm/shmem.c | |
parent | selftests/mm: add pkey_sighandler_xx, hugetlb_dio to .gitignore (diff) | |
download | linux-d2d243df445a88c26e91eac02b041213c7a32e9e.tar.xz linux-d2d243df445a88c26e91eac02b041213c7a32e9e.zip |
mm: shmem: fix khugepaged activation policy for shmem
Shmem has a separate interface (different from anonymous pages) to control
huge page allocation, that means shmem THP can be enabled while anonymous
THP is disabled. However, in this case, khugepaged will not start to
collapse shmem THP, which is unreasonable.
To fix this issue, we should call start_stop_khugepaged() to activate or
deactivate the khugepaged thread when setting shmem mTHP interfaces.
Moreover, add a new helper shmem_hpage_pmd_enabled() to help to check
whether shmem THP is enabled, which will determine if khugepaged should be
activated.
Link: https://lkml.kernel.org/r/9b9c6cbc4499bf44c6455367fd9e0f6036525680.1726978977.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reported-by: Ryan Roberts <ryan.roberts@arm.com>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r-- | mm/shmem.c | 29 |
1 files changed, 27 insertions, 2 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index e87f5d6799a7..6ad50ba60d8e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1655,6 +1655,23 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) } #ifdef CONFIG_TRANSPARENT_HUGEPAGE +bool shmem_hpage_pmd_enabled(void) +{ + if (shmem_huge == SHMEM_HUGE_DENY) + return false; + if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_always)) + return true; + if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_madvise)) + return true; + if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_within_size)) + return true; + if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_inherit) && + shmem_huge != SHMEM_HUGE_NEVER) + return true; + + return false; +} + unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, loff_t write_end, bool shmem_huge_force) @@ -5024,7 +5041,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { char tmp[16]; - int huge; + int huge, err; if (count + 1 > sizeof(tmp)) return -EINVAL; @@ -5048,7 +5065,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, shmem_huge = huge; if (shmem_huge > SHMEM_HUGE_DENY) SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; - return count; + + err = start_stop_khugepaged(); + return err ? err : count; } struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled); @@ -5125,6 +5144,12 @@ static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj, ret = -EINVAL; } + if (ret > 0) { + int err = start_stop_khugepaged(); + + if (err) + ret = err; + } return ret; } |