mirror of https://github.com/torvalds/linux.git
				
				
				
			20 hotfixes. 15 are cc:stable and the remainder address post-6.16 issues
or aren't considered necessary for -stable kernels.  14 of these fixes are
 for MM.
 
 This includes
 
 - a 3-patch kexec series from Breno that fixes a recently introduced
   use-uninitialized bug,
 
 - e 2-patch DAMON series from Quanmin Yan that avoids div-by-zero
   crashes which can occur if the operator uses poorly-chosen insmod
   parameters.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaMI7WQAKCRDdBJ7gKXxA
 jq3sAQDkflIN0qW3R7yqgUZfdO78T2LMmGlPW1L7F/ZXkxLk7gD/WgkWoec5cqi0
 ACiL81h6btIYBLHJ+SqJuowPMhaelQg=
 =fquW
 -----END PGP SIGNATURE-----
Merge tag 'mm-hotfixes-stable-2025-09-10-20-00' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton:
 "20 hotfixes. 15 are cc:stable and the remainder address post-6.16
  issues or aren't considered necessary for -stable kernels. 14 of these
  fixes are for MM.
  This includes
   - kexec fixes from Breno for a recently introduced
     use-uninitialized bug
   - DAMON fixes from Quanmin Yan to avoid div-by-zero crashes
     which can occur if the operator uses poorly-chosen insmod
     parameters
   and misc singleton fixes"
* tag 'mm-hotfixes-stable-2025-09-10-20-00' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  MAINTAINERS: add tree entry to numa memblocks and emulation block
  mm/damon/sysfs: fix use-after-free in state_show()
  proc: fix type confusion in pde_set_flags()
  compiler-clang.h: define __SANITIZE_*__ macros only when undefined
  mm/vmalloc, mm/kasan: respect gfp mask in kasan_populate_vmalloc()
  ocfs2: fix recursive semaphore deadlock in fiemap call
  mm/memory-failure: fix VM_BUG_ON_PAGE(PagePoisoned(page)) when unpoison memory
  mm/mremap: fix regression in vrm->new_addr check
  percpu: fix race on alloc failed warning limit
  mm/memory-failure: fix redundant updates for already poisoned pages
  s390: kexec: initialize kexec_buf struct
  riscv: kexec: initialize kexec_buf struct
  arm64: kexec: initialize kexec_buf struct in load_other_segments()
  mm/damon/reclaim: avoid divide-by-zero in damon_reclaim_apply_parameters()
  mm/damon/lru_sort: avoid divide-by-zero in damon_lru_sort_apply_parameters()
  mm/damon/core: set quota->charged_from to jiffies at first charge window
  mm/hugetlb: add missing hugetlb_lock in __unmap_hugepage_range()
  init/main.c: fix boot time tracing crash
  mm/memory_hotplug: fix hwpoisoned large folio handling in do_migrate_range()
  mm/khugepaged: fix the address passed to notifier on testing young
			
			
This commit is contained in:
		
						commit
						4f553c1e2c
					
				| 
						 | 
				
			
			@ -16128,6 +16128,7 @@ M:	Andrew Morton <akpm@linux-foundation.org>
 | 
			
		|||
M:	Mike Rapoport <rppt@kernel.org>
 | 
			
		||||
L:	linux-mm@kvack.org
 | 
			
		||||
S:	Maintained
 | 
			
		||||
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock.git
 | 
			
		||||
F:	include/linux/numa_memblks.h
 | 
			
		||||
F:	mm/numa.c
 | 
			
		||||
F:	mm/numa_emulation.c
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -94,7 +94,7 @@ int load_other_segments(struct kimage *image,
 | 
			
		|||
			char *initrd, unsigned long initrd_len,
 | 
			
		||||
			char *cmdline)
 | 
			
		||||
{
 | 
			
		||||
	struct kexec_buf kbuf;
 | 
			
		||||
	struct kexec_buf kbuf = {};
 | 
			
		||||
	void *dtb = NULL;
 | 
			
		||||
	unsigned long initrd_load_addr = 0, dtb_len,
 | 
			
		||||
		      orig_segments = image->nr_segments;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -16,7 +16,7 @@
 | 
			
		|||
static int kexec_file_add_kernel_elf(struct kimage *image,
 | 
			
		||||
				     struct s390_load_data *data)
 | 
			
		||||
{
 | 
			
		||||
	struct kexec_buf buf;
 | 
			
		||||
	struct kexec_buf buf = {};
 | 
			
		||||
	const Elf_Ehdr *ehdr;
 | 
			
		||||
	const Elf_Phdr *phdr;
 | 
			
		||||
	Elf_Addr entry;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -16,7 +16,7 @@
 | 
			
		|||
static int kexec_file_add_kernel_image(struct kimage *image,
 | 
			
		||||
				       struct s390_load_data *data)
 | 
			
		||||
{
 | 
			
		||||
	struct kexec_buf buf;
 | 
			
		||||
	struct kexec_buf buf = {};
 | 
			
		||||
 | 
			
		||||
	buf.image = image;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -129,7 +129,7 @@ static int kexec_file_update_purgatory(struct kimage *image,
 | 
			
		|||
static int kexec_file_add_purgatory(struct kimage *image,
 | 
			
		||||
				    struct s390_load_data *data)
 | 
			
		||||
{
 | 
			
		||||
	struct kexec_buf buf;
 | 
			
		||||
	struct kexec_buf buf = {};
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	buf.image = image;
 | 
			
		||||
| 
						 | 
				
			
			@ -152,7 +152,7 @@ static int kexec_file_add_purgatory(struct kimage *image,
 | 
			
		|||
static int kexec_file_add_initrd(struct kimage *image,
 | 
			
		||||
				 struct s390_load_data *data)
 | 
			
		||||
{
 | 
			
		||||
	struct kexec_buf buf;
 | 
			
		||||
	struct kexec_buf buf = {};
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	buf.image = image;
 | 
			
		||||
| 
						 | 
				
			
			@ -184,7 +184,7 @@ static int kexec_file_add_ipl_report(struct kimage *image,
 | 
			
		|||
{
 | 
			
		||||
	__u32 *lc_ipl_parmblock_ptr;
 | 
			
		||||
	unsigned int len, ncerts;
 | 
			
		||||
	struct kexec_buf buf;
 | 
			
		||||
	struct kexec_buf buf = {};
 | 
			
		||||
	unsigned long addr;
 | 
			
		||||
	void *ptr, *end;
 | 
			
		||||
	int ret;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -706,6 +706,8 @@ out:
 | 
			
		|||
 * it not only handles the fiemap for inlined files, but also deals
 | 
			
		||||
 * with the fast symlink, cause they have no difference for extent
 | 
			
		||||
 * mapping per se.
 | 
			
		||||
 *
 | 
			
		||||
 * Must be called with ip_alloc_sem semaphore held.
 | 
			
		||||
 */
 | 
			
		||||
static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
 | 
			
		||||
			       struct fiemap_extent_info *fieinfo,
 | 
			
		||||
| 
						 | 
				
			
			@ -717,6 +719,7 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
 | 
			
		|||
	u64 phys;
 | 
			
		||||
	u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST;
 | 
			
		||||
	struct ocfs2_inode_info *oi = OCFS2_I(inode);
 | 
			
		||||
	lockdep_assert_held_read(&oi->ip_alloc_sem);
 | 
			
		||||
 | 
			
		||||
	di = (struct ocfs2_dinode *)di_bh->b_data;
 | 
			
		||||
	if (ocfs2_inode_is_fast_symlink(inode))
 | 
			
		||||
| 
						 | 
				
			
			@ -732,8 +735,11 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
 | 
			
		|||
			phys += offsetof(struct ocfs2_dinode,
 | 
			
		||||
					 id2.i_data.id_data);
 | 
			
		||||
 | 
			
		||||
		/* Release the ip_alloc_sem to prevent deadlock on page fault */
 | 
			
		||||
		up_read(&OCFS2_I(inode)->ip_alloc_sem);
 | 
			
		||||
		ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count,
 | 
			
		||||
					      flags);
 | 
			
		||||
		down_read(&OCFS2_I(inode)->ip_alloc_sem);
 | 
			
		||||
		if (ret < 0)
 | 
			
		||||
			return ret;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -802,9 +808,11 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 | 
			
		|||
		len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits;
 | 
			
		||||
		phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits;
 | 
			
		||||
		virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits;
 | 
			
		||||
 | 
			
		||||
		/* Release the ip_alloc_sem to prevent deadlock on page fault */
 | 
			
		||||
		up_read(&OCFS2_I(inode)->ip_alloc_sem);
 | 
			
		||||
		ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes,
 | 
			
		||||
					      len_bytes, fe_flags);
 | 
			
		||||
		down_read(&OCFS2_I(inode)->ip_alloc_sem);
 | 
			
		||||
		if (ret)
 | 
			
		||||
			break;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -393,6 +393,7 @@ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
 | 
			
		|||
	if (proc_alloc_inum(&dp->low_ino))
 | 
			
		||||
		goto out_free_entry;
 | 
			
		||||
 | 
			
		||||
	if (!S_ISDIR(dp->mode))
 | 
			
		||||
		pde_set_flags(dp);
 | 
			
		||||
 | 
			
		||||
	write_lock(&proc_subdir_lock);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -18,23 +18,42 @@
 | 
			
		|||
#define KASAN_ABI_VERSION 5
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Clang 22 added preprocessor macros to match GCC, in hopes of eventually
 | 
			
		||||
 * dropping __has_feature support for sanitizers:
 | 
			
		||||
 * https://github.com/llvm/llvm-project/commit/568c23bbd3303518c5056d7f03444dae4fdc8a9c
 | 
			
		||||
 * Create these macros for older versions of clang so that it is easy to clean
 | 
			
		||||
 * up once the minimum supported version of LLVM for building the kernel always
 | 
			
		||||
 * creates these macros.
 | 
			
		||||
 *
 | 
			
		||||
 * Note: Checking __has_feature(*_sanitizer) is only true if the feature is
 | 
			
		||||
 * enabled. Therefore it is not required to additionally check defined(CONFIG_*)
 | 
			
		||||
 * to avoid adding redundant attributes in other configurations.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
 | 
			
		||||
/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
 | 
			
		||||
#if __has_feature(address_sanitizer) && !defined(__SANITIZE_ADDRESS__)
 | 
			
		||||
#define __SANITIZE_ADDRESS__
 | 
			
		||||
#endif
 | 
			
		||||
#if __has_feature(hwaddress_sanitizer) && !defined(__SANITIZE_HWADDRESS__)
 | 
			
		||||
#define __SANITIZE_HWADDRESS__
 | 
			
		||||
#endif
 | 
			
		||||
#if __has_feature(thread_sanitizer) && !defined(__SANITIZE_THREAD__)
 | 
			
		||||
#define __SANITIZE_THREAD__
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the kernel.
 | 
			
		||||
 */
 | 
			
		||||
#ifdef __SANITIZE_HWADDRESS__
 | 
			
		||||
#define __SANITIZE_ADDRESS__
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef __SANITIZE_ADDRESS__
 | 
			
		||||
#define __no_sanitize_address \
 | 
			
		||||
		__attribute__((no_sanitize("address", "hwaddress")))
 | 
			
		||||
#else
 | 
			
		||||
#define __no_sanitize_address
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if __has_feature(thread_sanitizer)
 | 
			
		||||
/* emulate gcc's __SANITIZE_THREAD__ flag */
 | 
			
		||||
#define __SANITIZE_THREAD__
 | 
			
		||||
#ifdef __SANITIZE_THREAD__
 | 
			
		||||
#define __no_sanitize_thread \
 | 
			
		||||
		__attribute__((no_sanitize("thread")))
 | 
			
		||||
#else
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -562,7 +562,7 @@ static inline void kasan_init_hw_tags(void) { }
 | 
			
		|||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 | 
			
		||||
 | 
			
		||||
void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
 | 
			
		||||
int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
 | 
			
		||||
int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
 | 
			
		||||
void kasan_release_vmalloc(unsigned long start, unsigned long end,
 | 
			
		||||
			   unsigned long free_region_start,
 | 
			
		||||
			   unsigned long free_region_end,
 | 
			
		||||
| 
						 | 
				
			
			@ -574,7 +574,7 @@ static inline void kasan_populate_early_vm_area_shadow(void *start,
 | 
			
		|||
						       unsigned long size)
 | 
			
		||||
{ }
 | 
			
		||||
static inline int kasan_populate_vmalloc(unsigned long start,
 | 
			
		||||
					unsigned long size)
 | 
			
		||||
					unsigned long size, gfp_t gfp_mask)
 | 
			
		||||
{
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -610,7 +610,7 @@ static __always_inline void kasan_poison_vmalloc(const void *start,
 | 
			
		|||
static inline void kasan_populate_early_vm_area_shadow(void *start,
 | 
			
		||||
						       unsigned long size) { }
 | 
			
		||||
static inline int kasan_populate_vmalloc(unsigned long start,
 | 
			
		||||
					unsigned long size)
 | 
			
		||||
					unsigned long size, gfp_t gfp_mask)
 | 
			
		||||
{
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -956,6 +956,7 @@ void start_kernel(void)
 | 
			
		|||
	sort_main_extable();
 | 
			
		||||
	trap_init();
 | 
			
		||||
	mm_core_init();
 | 
			
		||||
	maple_tree_init();
 | 
			
		||||
	poking_init();
 | 
			
		||||
	ftrace_init();
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -973,7 +974,6 @@ void start_kernel(void)
 | 
			
		|||
		 "Interrupts were enabled *very* early, fixing it\n"))
 | 
			
		||||
		local_irq_disable();
 | 
			
		||||
	radix_tree_init();
 | 
			
		||||
	maple_tree_init();
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Set up housekeeping before setting up workqueues to allow the unbound
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2111,6 +2111,10 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
 | 
			
		|||
	if (!quota->ms && !quota->sz && list_empty("a->goals))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	/* First charge window */
 | 
			
		||||
	if (!quota->total_charged_sz && !quota->charged_from)
 | 
			
		||||
		quota->charged_from = jiffies;
 | 
			
		||||
 | 
			
		||||
	/* New charge window starts */
 | 
			
		||||
	if (time_after_eq(jiffies, quota->charged_from +
 | 
			
		||||
				msecs_to_jiffies(quota->reset_interval))) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -198,6 +198,11 @@ static int damon_lru_sort_apply_parameters(void)
 | 
			
		|||
	if (err)
 | 
			
		||||
		return err;
 | 
			
		||||
 | 
			
		||||
	if (!damon_lru_sort_mon_attrs.sample_interval) {
 | 
			
		||||
		err = -EINVAL;
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs);
 | 
			
		||||
	if (err)
 | 
			
		||||
		goto out;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -194,6 +194,11 @@ static int damon_reclaim_apply_parameters(void)
 | 
			
		|||
	if (err)
 | 
			
		||||
		return err;
 | 
			
		||||
 | 
			
		||||
	if (!damon_reclaim_mon_attrs.aggr_interval) {
 | 
			
		||||
		err = -EINVAL;
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err = damon_set_attrs(param_ctx, &damon_reclaim_mon_attrs);
 | 
			
		||||
	if (err)
 | 
			
		||||
		goto out;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1260,14 +1260,18 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
 | 
			
		|||
{
 | 
			
		||||
	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
 | 
			
		||||
			struct damon_sysfs_kdamond, kobj);
 | 
			
		||||
	struct damon_ctx *ctx = kdamond->damon_ctx;
 | 
			
		||||
	bool running;
 | 
			
		||||
	struct damon_ctx *ctx;
 | 
			
		||||
	bool running = false;
 | 
			
		||||
 | 
			
		||||
	if (!ctx)
 | 
			
		||||
		running = false;
 | 
			
		||||
	else
 | 
			
		||||
	if (!mutex_trylock(&damon_sysfs_lock))
 | 
			
		||||
		return -EBUSY;
 | 
			
		||||
 | 
			
		||||
	ctx = kdamond->damon_ctx;
 | 
			
		||||
	if (ctx)
 | 
			
		||||
		running = damon_is_running(ctx);
 | 
			
		||||
 | 
			
		||||
	mutex_unlock(&damon_sysfs_lock);
 | 
			
		||||
 | 
			
		||||
	return sysfs_emit(buf, "%s\n", running ?
 | 
			
		||||
			damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
 | 
			
		||||
			damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5851,7 +5851,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 | 
			
		|||
	spinlock_t *ptl;
 | 
			
		||||
	struct hstate *h = hstate_vma(vma);
 | 
			
		||||
	unsigned long sz = huge_page_size(h);
 | 
			
		||||
	bool adjust_reservation = false;
 | 
			
		||||
	bool adjust_reservation;
 | 
			
		||||
	unsigned long last_addr_mask;
 | 
			
		||||
	bool force_flush = false;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -5944,6 +5944,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 | 
			
		|||
					sz);
 | 
			
		||||
		hugetlb_count_sub(pages_per_huge_page(h), mm);
 | 
			
		||||
		hugetlb_remove_rmap(folio);
 | 
			
		||||
		spin_unlock(ptl);
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * Restore the reservation for anonymous page, otherwise the
 | 
			
		||||
| 
						 | 
				
			
			@ -5951,14 +5952,16 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 | 
			
		|||
		 * If there we are freeing a surplus, do not set the restore
 | 
			
		||||
		 * reservation bit.
 | 
			
		||||
		 */
 | 
			
		||||
		adjust_reservation = false;
 | 
			
		||||
 | 
			
		||||
		spin_lock_irq(&hugetlb_lock);
 | 
			
		||||
		if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
 | 
			
		||||
		    folio_test_anon(folio)) {
 | 
			
		||||
			folio_set_hugetlb_restore_reserve(folio);
 | 
			
		||||
			/* Reservation to be adjusted after the spin lock */
 | 
			
		||||
			adjust_reservation = true;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		spin_unlock(ptl);
 | 
			
		||||
		spin_unlock_irq(&hugetlb_lock);
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * Adjust the reservation for the region that will have the
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -336,13 +336,13 @@ static void ___free_pages_bulk(struct page **pages, int nr_pages)
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
 | 
			
		||||
static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long nr_populated, nr_total = nr_pages;
 | 
			
		||||
	struct page **page_array = pages;
 | 
			
		||||
 | 
			
		||||
	while (nr_pages) {
 | 
			
		||||
		nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages);
 | 
			
		||||
		nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages);
 | 
			
		||||
		if (!nr_populated) {
 | 
			
		||||
			___free_pages_bulk(page_array, nr_total - nr_pages);
 | 
			
		||||
			return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -354,25 +354,42 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
 | 
			
		|||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
 | 
			
		||||
static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long nr_pages, nr_total = PFN_UP(end - start);
 | 
			
		||||
	struct vmalloc_populate_data data;
 | 
			
		||||
	unsigned int flags;
 | 
			
		||||
	int ret = 0;
 | 
			
		||||
 | 
			
		||||
	data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 | 
			
		||||
	data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO);
 | 
			
		||||
	if (!data.pages)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	while (nr_total) {
 | 
			
		||||
		nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
 | 
			
		||||
		ret = ___alloc_pages_bulk(data.pages, nr_pages);
 | 
			
		||||
		ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask);
 | 
			
		||||
		if (ret)
 | 
			
		||||
			break;
 | 
			
		||||
 | 
			
		||||
		data.start = start;
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * page tables allocations ignore external gfp mask, enforce it
 | 
			
		||||
		 * by the scope API
 | 
			
		||||
		 */
 | 
			
		||||
		if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
 | 
			
		||||
			flags = memalloc_nofs_save();
 | 
			
		||||
		else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
 | 
			
		||||
			flags = memalloc_noio_save();
 | 
			
		||||
 | 
			
		||||
		ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
 | 
			
		||||
					  kasan_populate_vmalloc_pte, &data);
 | 
			
		||||
 | 
			
		||||
		if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
 | 
			
		||||
			memalloc_nofs_restore(flags);
 | 
			
		||||
		else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
 | 
			
		||||
			memalloc_noio_restore(flags);
 | 
			
		||||
 | 
			
		||||
		___free_pages_bulk(data.pages, nr_pages);
 | 
			
		||||
		if (ret)
 | 
			
		||||
			break;
 | 
			
		||||
| 
						 | 
				
			
			@ -386,7 +403,7 @@ static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
 | 
			
		|||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
 | 
			
		||||
int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long shadow_start, shadow_end;
 | 
			
		||||
	int ret;
 | 
			
		||||
| 
						 | 
				
			
			@ -415,7 +432,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
 | 
			
		|||
	shadow_start = PAGE_ALIGN_DOWN(shadow_start);
 | 
			
		||||
	shadow_end = PAGE_ALIGN(shadow_end);
 | 
			
		||||
 | 
			
		||||
	ret = __kasan_populate_vmalloc(shadow_start, shadow_end);
 | 
			
		||||
	ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return ret;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1417,8 +1417,8 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
 | 
			
		|||
		 */
 | 
			
		||||
		if (cc->is_khugepaged &&
 | 
			
		||||
		    (pte_young(pteval) || folio_test_young(folio) ||
 | 
			
		||||
		     folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
 | 
			
		||||
								     address)))
 | 
			
		||||
		     folio_test_referenced(folio) ||
 | 
			
		||||
		     mmu_notifier_test_young(vma->vm_mm, _address)))
 | 
			
		||||
			referenced++;
 | 
			
		||||
	}
 | 
			
		||||
	if (!writable) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -956,7 +956,7 @@ static const char * const action_page_types[] = {
 | 
			
		|||
	[MF_MSG_BUDDY]			= "free buddy page",
 | 
			
		||||
	[MF_MSG_DAX]			= "dax page",
 | 
			
		||||
	[MF_MSG_UNSPLIT_THP]		= "unsplit thp",
 | 
			
		||||
	[MF_MSG_ALREADY_POISONED]	= "already poisoned",
 | 
			
		||||
	[MF_MSG_ALREADY_POISONED]	= "already poisoned page",
 | 
			
		||||
	[MF_MSG_UNKNOWN]		= "unknown page",
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1349,9 +1349,10 @@ static int action_result(unsigned long pfn, enum mf_action_page_type type,
 | 
			
		|||
{
 | 
			
		||||
	trace_memory_failure_event(pfn, type, result);
 | 
			
		||||
 | 
			
		||||
	if (type != MF_MSG_ALREADY_POISONED) {
 | 
			
		||||
		num_poisoned_pages_inc(pfn);
 | 
			
		||||
 | 
			
		||||
		update_per_node_mf_stats(pfn, result);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	pr_err("%#lx: recovery action for %s: %s\n",
 | 
			
		||||
		pfn, action_page_types[type], action_name[result]);
 | 
			
		||||
| 
						 | 
				
			
			@ -2094,12 +2095,11 @@ retry:
 | 
			
		|||
		*hugetlb = 0;
 | 
			
		||||
		return 0;
 | 
			
		||||
	} else if (res == -EHWPOISON) {
 | 
			
		||||
		pr_err("%#lx: already hardware poisoned\n", pfn);
 | 
			
		||||
		if (flags & MF_ACTION_REQUIRED) {
 | 
			
		||||
			folio = page_folio(p);
 | 
			
		||||
			res = kill_accessing_process(current, folio_pfn(folio), flags);
 | 
			
		||||
			action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
 | 
			
		||||
		}
 | 
			
		||||
		action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
 | 
			
		||||
		return res;
 | 
			
		||||
	} else if (res == -EBUSY) {
 | 
			
		||||
		if (!(flags & MF_NO_RETRY)) {
 | 
			
		||||
| 
						 | 
				
			
			@ -2285,7 +2285,6 @@ try_again:
 | 
			
		|||
		goto unlock_mutex;
 | 
			
		||||
 | 
			
		||||
	if (TestSetPageHWPoison(p)) {
 | 
			
		||||
		pr_err("%#lx: already hardware poisoned\n", pfn);
 | 
			
		||||
		res = -EHWPOISON;
 | 
			
		||||
		if (flags & MF_ACTION_REQUIRED)
 | 
			
		||||
			res = kill_accessing_process(current, pfn, flags);
 | 
			
		||||
| 
						 | 
				
			
			@ -2569,10 +2568,9 @@ int unpoison_memory(unsigned long pfn)
 | 
			
		|||
	static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
 | 
			
		||||
					DEFAULT_RATELIMIT_BURST);
 | 
			
		||||
 | 
			
		||||
	if (!pfn_valid(pfn))
 | 
			
		||||
		return -ENXIO;
 | 
			
		||||
 | 
			
		||||
	p = pfn_to_page(pfn);
 | 
			
		||||
	p = pfn_to_online_page(pfn);
 | 
			
		||||
	if (!p)
 | 
			
		||||
		return -EIO;
 | 
			
		||||
	folio = page_folio(p);
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&mf_mutex);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1815,8 +1815,14 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 | 
			
		|||
			pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
 | 
			
		||||
 | 
			
		||||
		if (folio_contain_hwpoisoned_page(folio)) {
 | 
			
		||||
			if (WARN_ON(folio_test_lru(folio)))
 | 
			
		||||
				folio_isolate_lru(folio);
 | 
			
		||||
			/*
 | 
			
		||||
			 * unmap_poisoned_folio() cannot handle large folios
 | 
			
		||||
			 * in all cases yet.
 | 
			
		||||
			 */
 | 
			
		||||
			if (folio_test_large(folio) && !folio_test_hugetlb(folio))
 | 
			
		||||
				goto put_folio;
 | 
			
		||||
			if (folio_test_lru(folio) && !folio_isolate_lru(folio))
 | 
			
		||||
				goto put_folio;
 | 
			
		||||
			if (folio_mapped(folio)) {
 | 
			
		||||
				folio_lock(folio);
 | 
			
		||||
				unmap_poisoned_folio(folio, pfn, false);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1774,15 +1774,18 @@ static unsigned long check_mremap_params(struct vma_remap_struct *vrm)
 | 
			
		|||
	if (!vrm->new_len)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	/* Is the new length or address silly? */
 | 
			
		||||
	if (vrm->new_len > TASK_SIZE ||
 | 
			
		||||
	    vrm->new_addr > TASK_SIZE - vrm->new_len)
 | 
			
		||||
	/* Is the new length silly? */
 | 
			
		||||
	if (vrm->new_len > TASK_SIZE)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	/* Remainder of checks are for cases with specific new_addr. */
 | 
			
		||||
	if (!vrm_implies_new_addr(vrm))
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	/* Is the new address silly? */
 | 
			
		||||
	if (vrm->new_addr > TASK_SIZE - vrm->new_len)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	/* The new address must be page-aligned. */
 | 
			
		||||
	if (offset_in_page(vrm->new_addr))
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										10
									
								
								mm/percpu.c
								
								
								
								
							
							
						
						
									
										10
									
								
								mm/percpu.c
								
								
								
								
							| 
						 | 
				
			
			@ -1734,7 +1734,7 @@ void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
 | 
			
		|||
	bool is_atomic;
 | 
			
		||||
	bool do_warn;
 | 
			
		||||
	struct obj_cgroup *objcg = NULL;
 | 
			
		||||
	static int warn_limit = 10;
 | 
			
		||||
	static atomic_t warn_limit = ATOMIC_INIT(10);
 | 
			
		||||
	struct pcpu_chunk *chunk, *next;
 | 
			
		||||
	const char *err;
 | 
			
		||||
	int slot, off, cpu, ret;
 | 
			
		||||
| 
						 | 
				
			
			@ -1904,14 +1904,18 @@ fail_unlock:
 | 
			
		|||
fail:
 | 
			
		||||
	trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
 | 
			
		||||
 | 
			
		||||
	if (do_warn && warn_limit) {
 | 
			
		||||
	if (do_warn) {
 | 
			
		||||
		int remaining = atomic_dec_if_positive(&warn_limit);
 | 
			
		||||
 | 
			
		||||
		if (remaining >= 0) {
 | 
			
		||||
			pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
 | 
			
		||||
				size, align, is_atomic, err);
 | 
			
		||||
			if (!is_atomic)
 | 
			
		||||
				dump_stack();
 | 
			
		||||
		if (!--warn_limit)
 | 
			
		||||
			if (remaining == 0)
 | 
			
		||||
				pr_info("limit reached, disable warning\n");
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (is_atomic) {
 | 
			
		||||
		/* see the flag handling in pcpu_balance_workfn() */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2026,6 +2026,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
 | 
			
		|||
	if (unlikely(!vmap_initialized))
 | 
			
		||||
		return ERR_PTR(-EBUSY);
 | 
			
		||||
 | 
			
		||||
	/* Only reclaim behaviour flags are relevant. */
 | 
			
		||||
	gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
 | 
			
		||||
	might_sleep();
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			@ -2038,8 +2040,6 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
 | 
			
		|||
	 */
 | 
			
		||||
	va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
 | 
			
		||||
	if (!va) {
 | 
			
		||||
		gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
 | 
			
		||||
 | 
			
		||||
		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
 | 
			
		||||
		if (unlikely(!va))
 | 
			
		||||
			return ERR_PTR(-ENOMEM);
 | 
			
		||||
| 
						 | 
				
			
			@ -2089,7 +2089,7 @@ retry:
 | 
			
		|||
	BUG_ON(va->va_start < vstart);
 | 
			
		||||
	BUG_ON(va->va_end > vend);
 | 
			
		||||
 | 
			
		||||
	ret = kasan_populate_vmalloc(addr, size);
 | 
			
		||||
	ret = kasan_populate_vmalloc(addr, size, gfp_mask);
 | 
			
		||||
	if (ret) {
 | 
			
		||||
		free_vmap_area(va);
 | 
			
		||||
		return ERR_PTR(ret);
 | 
			
		||||
| 
						 | 
				
			
			@ -4826,7 +4826,7 @@ retry:
 | 
			
		|||
 | 
			
		||||
	/* populate the kasan shadow space */
 | 
			
		||||
	for (area = 0; area < nr_vms; area++) {
 | 
			
		||||
		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
 | 
			
		||||
		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL))
 | 
			
		||||
			goto err_free_shadow;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue