summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBaolin Wang <baolin.wang@linux.alibaba.com>2024-12-19 08:30:08 +0100
committerAndrew Morton <akpm@linux-foundation.org>2024-12-31 02:59:09 +0100
commitd0e6983a6d1719738cf8d13982a68094f0a1872a (patch)
tree3d0c0a8f384da2cbeb0e5c5c724802cfb67d2699
parentpercpu: remove intermediate variable in PERCPU_PTR() (diff)
downloadlinux-d0e6983a6d1719738cf8d13982a68094f0a1872a.tar.xz
linux-d0e6983a6d1719738cf8d13982a68094f0a1872a.zip
mm: shmem: fix incorrect index alignment for within_size policy
With enabling the shmem per-size within_size policy, using an incorrect 'order' size to round_up() the index can lead to incorrect i_size checks, resulting in an inappropriate large orders being returned. Changing to use '1 << order' to round_up() the index to fix this issue. Additionally, adding an 'aligned_index' variable to avoid affecting the index checks. Link: https://lkml.kernel.org/r/77d8ef76a7d3d646e9225e9af88a76549a68aab1.1734593154.git.baolin.wang@linux.alibaba.com Fixes: e7a2ab7b3bb5 ("mm: shmem: add mTHP support for anonymous shmem") Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/shmem.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index f6fb053ac50d..dec659e84562 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1689,6 +1689,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
unsigned long vm_flags = vma ? vma->vm_flags : 0;
+ pgoff_t aligned_index;
bool global_huge;
loff_t i_size;
int order;
@@ -1723,9 +1724,9 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
/* Allow mTHP that will be fully within i_size. */
order = highest_order(within_size_orders);
while (within_size_orders) {
- index = round_up(index + 1, order);
+ aligned_index = round_up(index + 1, 1 << order);
i_size = round_up(i_size_read(inode), PAGE_SIZE);
- if (i_size >> PAGE_SHIFT >= index) {
+ if (i_size >> PAGE_SHIFT >= aligned_index) {
mask |= within_size_orders;
break;
}