diff options
-rw-r--r-- | .mailmap | 1 | ||||
-rw-r--r-- | fs/ocfs2/dir.c | 25 | ||||
-rw-r--r-- | lib/alloc_tag.c | 3 | ||||
-rw-r--r-- | mm/khugepaged.c | 4 | ||||
-rw-r--r-- | mm/page_alloc.c | 3 | ||||
-rw-r--r-- | mm/shmem.c | 2 | ||||
-rw-r--r-- | mm/zswap.c | 42 |
7 files changed, 55 insertions, 25 deletions
@@ -202,6 +202,7 @@ Elliot Berman <quic_eberman@quicinc.com> <eberman@codeaurora.org> Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com> Enric Balletbo i Serra <eballetbo@kernel.org> <eballetbo@iseebcn.com> Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com> +Ethan Carter Edwards <ethan@ethancedwards.com> Ethan Edwards <ethancarteredwards@gmail.com> Eugen Hristev <eugen.hristev@linaro.org> <eugen.hristev@microchip.com> Eugen Hristev <eugen.hristev@linaro.org> <eugen.hristev@collabora.com> Evgeniy Polyakov <johnpol@2ka.mipt.ru> diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 213206ebdd58..7799f4d16ce9 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -1065,26 +1065,39 @@ int ocfs2_find_entry(const char *name, int namelen, { struct buffer_head *bh; struct ocfs2_dir_entry *res_dir = NULL; + int ret = 0; if (ocfs2_dir_indexed(dir)) return ocfs2_find_entry_dx(name, namelen, dir, lookup); + if (unlikely(i_size_read(dir) <= 0)) { + ret = -EFSCORRUPTED; + mlog_errno(ret); + goto out; + } /* * The unindexed dir code only uses part of the lookup * structure, so there's no reason to push it down further * than this. */ - if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) + if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + if (unlikely(i_size_read(dir) > dir->i_sb->s_blocksize)) { + ret = -EFSCORRUPTED; + mlog_errno(ret); + goto out; + } bh = ocfs2_find_entry_id(name, namelen, dir, &res_dir); - else + } else { bh = ocfs2_find_entry_el(name, namelen, dir, &res_dir); + } if (bh == NULL) return -ENOENT; lookup->dl_leaf_bh = bh; lookup->dl_entry = res_dir; - return 0; +out: + return ret; } /* @@ -2010,6 +2023,7 @@ int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name, * * Return 0 if the name does not exist * Return -EEXIST if the directory contains the name + * Return -EFSCORRUPTED if found corruption * * Callers should have i_rwsem + a cluster lock on dir */ @@ -2023,9 +2037,12 @@ int ocfs2_check_dir_for_entry(struct inode *dir, trace_ocfs2_check_dir_for_entry( (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name); - if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) { + ret = ocfs2_find_entry(name, namelen, dir, &lookup); + if (ret == 0) { ret = -EEXIST; mlog_errno(ret); + } else if (ret == -ENOENT) { + ret = 0; } ocfs2_free_dir_lookup_result(&lookup); diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 7dcebf118a3e..65e706e1bc19 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -195,6 +195,9 @@ void pgalloc_tag_swap(struct folio *new, struct folio *old) union codetag_ref ref_old, ref_new; struct alloc_tag *tag_old, *tag_new; + if (!mem_alloc_profiling_enabled()) + return; + tag_old = pgalloc_tag_get(&old->page); if (!tag_old) return; diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 653dbb1ff05c..bad1e130eda8 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2422,7 +2422,7 @@ skip: VM_BUG_ON(khugepaged_scan.address < hstart || khugepaged_scan.address + HPAGE_PMD_SIZE > hend); - if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { + if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) { struct file *file = get_file(vma->vm_file); pgoff_t pgoff = linear_page_index(vma, khugepaged_scan.address); @@ -2768,7 +2768,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, mmap_assert_locked(mm); memset(cc->node_load, 0, sizeof(cc->node_load)); nodes_clear(cc->alloc_nmask); - if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { + if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) { struct file *file = get_file(vma->vm_file); pgoff_t pgoff = linear_page_index(vma, addr); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cae7b93864c2..01eab25edf89 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5692,10 +5692,13 @@ __meminit void zone_pcp_init(struct zone *zone) zone->present_pages, zone_batchsize(zone)); } +static void setup_per_zone_lowmem_reserve(void); + void adjust_managed_page_count(struct page *page, long count) { atomic_long_add(count, &page_zone(page)->managed_pages); totalram_pages_add(count); + setup_per_zone_lowmem_reserve(); } EXPORT_SYMBOL(adjust_managed_page_count); diff --git a/mm/shmem.c b/mm/shmem.c index ac58d4fb2e6f..fdb5afa1cfe9 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -4368,7 +4368,7 @@ static int shmem_parse_opt_casefold(struct fs_context *fc, struct fs_parameter * bool latest_version) { struct shmem_options *ctx = fc->fs_private; - unsigned int version = UTF8_LATEST; + int version = UTF8_LATEST; struct unicode_map *encoding; char *version_str = param->string + 5; diff --git a/mm/zswap.c b/mm/zswap.c index 30f5a27a6862..b84c20d889b1 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -820,15 +820,15 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) { struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); - struct crypto_acomp *acomp; - struct acomp_req *req; + struct crypto_acomp *acomp = NULL; + struct acomp_req *req = NULL; + u8 *buffer = NULL; int ret; - mutex_lock(&acomp_ctx->mutex); - acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); - if (!acomp_ctx->buffer) { + buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); + if (!buffer) { ret = -ENOMEM; - goto buffer_fail; + goto fail; } acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu)); @@ -836,21 +836,25 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) pr_err("could not alloc crypto acomp %s : %ld\n", pool->tfm_name, PTR_ERR(acomp)); ret = PTR_ERR(acomp); - goto acomp_fail; + goto fail; } - acomp_ctx->acomp = acomp; - acomp_ctx->is_sleepable = acomp_is_async(acomp); - req = acomp_request_alloc(acomp_ctx->acomp); + req = acomp_request_alloc(acomp); if (!req) { pr_err("could not alloc crypto acomp_request %s\n", pool->tfm_name); ret = -ENOMEM; - goto req_fail; + goto fail; } - acomp_ctx->req = req; + /* + * Only hold the mutex after completing allocations, otherwise we may + * recurse into zswap through reclaim and attempt to hold the mutex + * again resulting in a deadlock. + */ + mutex_lock(&acomp_ctx->mutex); crypto_init_wait(&acomp_ctx->wait); + /* * if the backend of acomp is async zip, crypto_req_done() will wakeup * crypto_wait_req(); if the backend of acomp is scomp, the callback @@ -859,15 +863,17 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &acomp_ctx->wait); + acomp_ctx->buffer = buffer; + acomp_ctx->acomp = acomp; + acomp_ctx->is_sleepable = acomp_is_async(acomp); + acomp_ctx->req = req; mutex_unlock(&acomp_ctx->mutex); return 0; -req_fail: - crypto_free_acomp(acomp_ctx->acomp); -acomp_fail: - kfree(acomp_ctx->buffer); -buffer_fail: - mutex_unlock(&acomp_ctx->mutex); +fail: + if (acomp) + crypto_free_acomp(acomp); + kfree(buffer); return ret; } |