mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	Merge branch 'akpm' (Fixes from Andrew)
Merge misc fixes from Andrew Morton: "8 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (8 patches) futex: avoid wake_futex() for a PI futex_q watchdog: using u64 in get_sample_period() writeback: put unused inodes to LRU after writeback completion mm: vmscan: check for fatal signals iff the process was throttled Revert "mm: remove __GFP_NO_KSWAPD" proc: check vma->vm_file before dereferencing UAPI: strip the _UAPI prefix from header guards during header installation include/linux/bug.h: fix sparse warning related to BUILD_BUG_ON_INVALID
This commit is contained in:
		
						commit
						2844a48706
					
				
					 13 changed files with 83 additions and 23 deletions
				
			
		| 
						 | 
				
			
			@ -1077,7 +1077,8 @@ EXPORT_SYMBOL_GPL(mtd_writev);
 | 
			
		|||
 * until the request succeeds or until the allocation size falls below
 | 
			
		||||
 * the system page size. This attempts to make sure it does not adversely
 | 
			
		||||
 * impact system performance, so when allocating more than one page, we
 | 
			
		||||
 * ask the memory allocator to avoid re-trying.
 | 
			
		||||
 * ask the memory allocator to avoid re-trying, swapping, writing back
 | 
			
		||||
 * or performing I/O.
 | 
			
		||||
 *
 | 
			
		||||
 * Note, this function also makes sure that the allocated buffer is aligned to
 | 
			
		||||
 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
 | 
			
		||||
| 
						 | 
				
			
			@ -1091,7 +1092,8 @@ EXPORT_SYMBOL_GPL(mtd_writev);
 | 
			
		|||
 */
 | 
			
		||||
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
 | 
			
		||||
{
 | 
			
		||||
	gfp_t flags = __GFP_NOWARN | __GFP_WAIT | __GFP_NORETRY;
 | 
			
		||||
	gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
 | 
			
		||||
		       __GFP_NORETRY | __GFP_NO_KSWAPD;
 | 
			
		||||
	size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
 | 
			
		||||
	void *kbuf;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -228,6 +228,8 @@ static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
 | 
			
		|||
static void inode_sync_complete(struct inode *inode)
 | 
			
		||||
{
 | 
			
		||||
	inode->i_state &= ~I_SYNC;
 | 
			
		||||
	/* If inode is clean an unused, put it into LRU now... */
 | 
			
		||||
	inode_add_lru(inode);
 | 
			
		||||
	/* Waiters must see I_SYNC cleared before being woken up */
 | 
			
		||||
	smp_mb();
 | 
			
		||||
	wake_up_bit(&inode->i_state, __I_SYNC);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										16
									
								
								fs/inode.c
									
									
									
									
									
								
							
							
						
						
									
										16
									
								
								fs/inode.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -408,6 +408,19 @@ static void inode_lru_list_add(struct inode *inode)
 | 
			
		|||
	spin_unlock(&inode->i_sb->s_inode_lru_lock);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Add inode to LRU if needed (inode is unused and clean).
 | 
			
		||||
 *
 | 
			
		||||
 * Needs inode->i_lock held.
 | 
			
		||||
 */
 | 
			
		||||
void inode_add_lru(struct inode *inode)
 | 
			
		||||
{
 | 
			
		||||
	if (!(inode->i_state & (I_DIRTY | I_SYNC | I_FREEING | I_WILL_FREE)) &&
 | 
			
		||||
	    !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
 | 
			
		||||
		inode_lru_list_add(inode);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
static void inode_lru_list_del(struct inode *inode)
 | 
			
		||||
{
 | 
			
		||||
	spin_lock(&inode->i_sb->s_inode_lru_lock);
 | 
			
		||||
| 
						 | 
				
			
			@ -1390,8 +1403,7 @@ static void iput_final(struct inode *inode)
 | 
			
		|||
 | 
			
		||||
	if (!drop && (sb->s_flags & MS_ACTIVE)) {
 | 
			
		||||
		inode->i_state |= I_REFERENCED;
 | 
			
		||||
		if (!(inode->i_state & (I_DIRTY|I_SYNC)))
 | 
			
		||||
			inode_lru_list_add(inode);
 | 
			
		||||
		inode_add_lru(inode);
 | 
			
		||||
		spin_unlock(&inode->i_lock);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -110,6 +110,7 @@ extern int open_check_o_direct(struct file *f);
 | 
			
		|||
 * inode.c
 | 
			
		||||
 */
 | 
			
		||||
extern spinlock_t inode_sb_list_lock;
 | 
			
		||||
extern void inode_add_lru(struct inode *inode);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * fs-writeback.c
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1877,8 +1877,9 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
 | 
			
		|||
	if (!vma)
 | 
			
		||||
		goto out_no_vma;
 | 
			
		||||
 | 
			
		||||
	result = proc_map_files_instantiate(dir, dentry, task,
 | 
			
		||||
			(void *)(unsigned long)vma->vm_file->f_mode);
 | 
			
		||||
	if (vma->vm_file)
 | 
			
		||||
		result = proc_map_files_instantiate(dir, dentry, task,
 | 
			
		||||
				(void *)(unsigned long)vma->vm_file->f_mode);
 | 
			
		||||
 | 
			
		||||
out_no_vma:
 | 
			
		||||
	up_read(&mm->mmap_sem);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -15,6 +15,7 @@ struct pt_regs;
 | 
			
		|||
#define BUILD_BUG_ON_NOT_POWER_OF_2(n)
 | 
			
		||||
#define BUILD_BUG_ON_ZERO(e) (0)
 | 
			
		||||
#define BUILD_BUG_ON_NULL(e) ((void*)0)
 | 
			
		||||
#define BUILD_BUG_ON_INVALID(e) (0)
 | 
			
		||||
#define BUILD_BUG_ON(condition)
 | 
			
		||||
#define BUILD_BUG() (0)
 | 
			
		||||
#else /* __CHECKER__ */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -31,6 +31,7 @@ struct vm_area_struct;
 | 
			
		|||
#define ___GFP_THISNODE		0x40000u
 | 
			
		||||
#define ___GFP_RECLAIMABLE	0x80000u
 | 
			
		||||
#define ___GFP_NOTRACK		0x200000u
 | 
			
		||||
#define ___GFP_NO_KSWAPD	0x400000u
 | 
			
		||||
#define ___GFP_OTHER_NODE	0x800000u
 | 
			
		||||
#define ___GFP_WRITE		0x1000000u
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -85,6 +86,7 @@ struct vm_area_struct;
 | 
			
		|||
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
 | 
			
		||||
#define __GFP_NOTRACK	((__force gfp_t)___GFP_NOTRACK)  /* Don't track with kmemcheck */
 | 
			
		||||
 | 
			
		||||
#define __GFP_NO_KSWAPD	((__force gfp_t)___GFP_NO_KSWAPD)
 | 
			
		||||
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
 | 
			
		||||
#define __GFP_WRITE	((__force gfp_t)___GFP_WRITE)	/* Allocator intends to dirty page */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -114,7 +116,8 @@ struct vm_area_struct;
 | 
			
		|||
				 __GFP_MOVABLE)
 | 
			
		||||
#define GFP_IOFS	(__GFP_IO | __GFP_FS)
 | 
			
		||||
#define GFP_TRANSHUGE	(GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
 | 
			
		||||
			 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN)
 | 
			
		||||
			 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
 | 
			
		||||
			 __GFP_NO_KSWAPD)
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_NUMA
 | 
			
		||||
#define GFP_THISNODE	(__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -36,6 +36,7 @@
 | 
			
		|||
	{(unsigned long)__GFP_RECLAIMABLE,	"GFP_RECLAIMABLE"},	\
 | 
			
		||||
	{(unsigned long)__GFP_MOVABLE,		"GFP_MOVABLE"},		\
 | 
			
		||||
	{(unsigned long)__GFP_NOTRACK,		"GFP_NOTRACK"},		\
 | 
			
		||||
	{(unsigned long)__GFP_NO_KSWAPD,	"GFP_NO_KSWAPD"},	\
 | 
			
		||||
	{(unsigned long)__GFP_OTHER_NODE,	"GFP_OTHER_NODE"}	\
 | 
			
		||||
	) : "GFP_NOWAIT"
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -843,6 +843,9 @@ static void wake_futex(struct futex_q *q)
 | 
			
		|||
{
 | 
			
		||||
	struct task_struct *p = q->task;
 | 
			
		||||
 | 
			
		||||
	if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * We set q->lock_ptr = NULL _before_ we wake up the task. If
 | 
			
		||||
	 * a non-futex wake up happens on another CPU then the task
 | 
			
		||||
| 
						 | 
				
			
			@ -1078,6 +1081,10 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
 | 
			
		|||
 | 
			
		||||
	plist_for_each_entry_safe(this, next, head, list) {
 | 
			
		||||
		if (match_futex (&this->key, &key1)) {
 | 
			
		||||
			if (this->pi_state || this->rt_waiter) {
 | 
			
		||||
				ret = -EINVAL;
 | 
			
		||||
				goto out_unlock;
 | 
			
		||||
			}
 | 
			
		||||
			wake_futex(this);
 | 
			
		||||
			if (++ret >= nr_wake)
 | 
			
		||||
				break;
 | 
			
		||||
| 
						 | 
				
			
			@ -1090,6 +1097,10 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
 | 
			
		|||
		op_ret = 0;
 | 
			
		||||
		plist_for_each_entry_safe(this, next, head, list) {
 | 
			
		||||
			if (match_futex (&this->key, &key2)) {
 | 
			
		||||
				if (this->pi_state || this->rt_waiter) {
 | 
			
		||||
					ret = -EINVAL;
 | 
			
		||||
					goto out_unlock;
 | 
			
		||||
				}
 | 
			
		||||
				wake_futex(this);
 | 
			
		||||
				if (++op_ret >= nr_wake2)
 | 
			
		||||
					break;
 | 
			
		||||
| 
						 | 
				
			
			@ -1098,6 +1109,7 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
 | 
			
		|||
		ret += op_ret;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
out_unlock:
 | 
			
		||||
	double_unlock_hb(hb1, hb2);
 | 
			
		||||
out_put_keys:
 | 
			
		||||
	put_futex_key(&key2);
 | 
			
		||||
| 
						 | 
				
			
			@ -1387,9 +1399,13 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
 | 
			
		|||
		/*
 | 
			
		||||
		 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
 | 
			
		||||
		 * be paired with each other and no other futex ops.
 | 
			
		||||
		 *
 | 
			
		||||
		 * We should never be requeueing a futex_q with a pi_state,
 | 
			
		||||
		 * which is awaiting a futex_unlock_pi().
 | 
			
		||||
		 */
 | 
			
		||||
		if ((requeue_pi && !this->rt_waiter) ||
 | 
			
		||||
		    (!requeue_pi && this->rt_waiter)) {
 | 
			
		||||
		    (!requeue_pi && this->rt_waiter) ||
 | 
			
		||||
		    this->pi_state) {
 | 
			
		||||
			ret = -EINVAL;
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -116,7 +116,7 @@ static unsigned long get_timestamp(int this_cpu)
 | 
			
		|||
	return cpu_clock(this_cpu) >> 30LL;  /* 2^30 ~= 10^9 */
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static unsigned long get_sample_period(void)
 | 
			
		||||
static u64 get_sample_period(void)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * convert watchdog_thresh from seconds to ns
 | 
			
		||||
| 
						 | 
				
			
			@ -125,7 +125,7 @@ static unsigned long get_sample_period(void)
 | 
			
		|||
	 * and hard thresholds) to increment before the
 | 
			
		||||
	 * hardlockup detector generates a warning
 | 
			
		||||
	 */
 | 
			
		||||
	return get_softlockup_thresh() * (NSEC_PER_SEC / 5);
 | 
			
		||||
	return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Commands for resetting the watchdog */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2416,8 +2416,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 | 
			
		|||
		goto nopage;
 | 
			
		||||
 | 
			
		||||
restart:
 | 
			
		||||
	wake_all_kswapd(order, zonelist, high_zoneidx,
 | 
			
		||||
					zone_idx(preferred_zone));
 | 
			
		||||
	if (!(gfp_mask & __GFP_NO_KSWAPD))
 | 
			
		||||
		wake_all_kswapd(order, zonelist, high_zoneidx,
 | 
			
		||||
						zone_idx(preferred_zone));
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * OK, we're below the kswapd watermark and have kicked background
 | 
			
		||||
| 
						 | 
				
			
			@ -2494,7 +2495,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 | 
			
		|||
	 * system then fail the allocation instead of entering direct reclaim.
 | 
			
		||||
	 */
 | 
			
		||||
	if ((deferred_compaction || contended_compaction) &&
 | 
			
		||||
	    (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
 | 
			
		||||
						(gfp_mask & __GFP_NO_KSWAPD))
 | 
			
		||||
		goto nopage;
 | 
			
		||||
 | 
			
		||||
	/* Try direct reclaim and then allocating */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										37
									
								
								mm/vmscan.c
									
									
									
									
									
								
							
							
						
						
									
										37
									
								
								mm/vmscan.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -2207,9 +2207,12 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
 | 
			
		|||
 * Throttle direct reclaimers if backing storage is backed by the network
 | 
			
		||||
 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
 | 
			
		||||
 * depleted. kswapd will continue to make progress and wake the processes
 | 
			
		||||
 * when the low watermark is reached
 | 
			
		||||
 * when the low watermark is reached.
 | 
			
		||||
 *
 | 
			
		||||
 * Returns true if a fatal signal was delivered during throttling. If this
 | 
			
		||||
 * happens, the page allocator should not consider triggering the OOM killer.
 | 
			
		||||
 */
 | 
			
		||||
static void throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
 | 
			
		||||
static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
 | 
			
		||||
					nodemask_t *nodemask)
 | 
			
		||||
{
 | 
			
		||||
	struct zone *zone;
 | 
			
		||||
| 
						 | 
				
			
			@ -2224,13 +2227,20 @@ static void throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
 | 
			
		|||
	 * processes to block on log_wait_commit().
 | 
			
		||||
	 */
 | 
			
		||||
	if (current->flags & PF_KTHREAD)
 | 
			
		||||
		return;
 | 
			
		||||
		goto out;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * If a fatal signal is pending, this process should not throttle.
 | 
			
		||||
	 * It should return quickly so it can exit and free its memory
 | 
			
		||||
	 */
 | 
			
		||||
	if (fatal_signal_pending(current))
 | 
			
		||||
		goto out;
 | 
			
		||||
 | 
			
		||||
	/* Check if the pfmemalloc reserves are ok */
 | 
			
		||||
	first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
 | 
			
		||||
	pgdat = zone->zone_pgdat;
 | 
			
		||||
	if (pfmemalloc_watermark_ok(pgdat))
 | 
			
		||||
		return;
 | 
			
		||||
		goto out;
 | 
			
		||||
 | 
			
		||||
	/* Account for the throttling */
 | 
			
		||||
	count_vm_event(PGSCAN_DIRECT_THROTTLE);
 | 
			
		||||
| 
						 | 
				
			
			@ -2246,12 +2256,20 @@ static void throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
 | 
			
		|||
	if (!(gfp_mask & __GFP_FS)) {
 | 
			
		||||
		wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
 | 
			
		||||
			pfmemalloc_watermark_ok(pgdat), HZ);
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
		goto check_pending;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Throttle until kswapd wakes the process */
 | 
			
		||||
	wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
 | 
			
		||||
		pfmemalloc_watermark_ok(pgdat));
 | 
			
		||||
 | 
			
		||||
check_pending:
 | 
			
		||||
	if (fatal_signal_pending(current))
 | 
			
		||||
		return true;
 | 
			
		||||
 | 
			
		||||
out:
 | 
			
		||||
	return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 | 
			
		||||
| 
						 | 
				
			
			@ -2273,13 +2291,12 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 | 
			
		|||
		.gfp_mask = sc.gfp_mask,
 | 
			
		||||
	};
 | 
			
		||||
 | 
			
		||||
	throttle_direct_reclaim(gfp_mask, zonelist, nodemask);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Do not enter reclaim if fatal signal is pending. 1 is returned so
 | 
			
		||||
	 * that the page allocator does not consider triggering OOM
 | 
			
		||||
	 * Do not enter reclaim if fatal signal was delivered while throttled.
 | 
			
		||||
	 * 1 is returned so that the page allocator does not OOM kill at this
 | 
			
		||||
	 * point.
 | 
			
		||||
	 */
 | 
			
		||||
	if (fatal_signal_pending(current))
 | 
			
		||||
	if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
 | 
			
		||||
		return 1;
 | 
			
		||||
 | 
			
		||||
	trace_mm_vmscan_direct_reclaim_begin(order,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -42,6 +42,9 @@ foreach my $filename (@files) {
 | 
			
		|||
		$line =~ s/(^|\s)(inline)\b/$1__$2__/g;
 | 
			
		||||
		$line =~ s/(^|\s)(asm)\b(\s|[(]|$)/$1__$2__$3/g;
 | 
			
		||||
		$line =~ s/(^|\s|[(])(volatile)\b(\s|[(]|$)/$1__$2__$3/g;
 | 
			
		||||
		$line =~ s/#ifndef _UAPI/#ifndef /;
 | 
			
		||||
		$line =~ s/#define _UAPI/#define /;
 | 
			
		||||
		$line =~ s!#endif /[*] _UAPI!#endif /* !;
 | 
			
		||||
		printf {$out} "%s", $line;
 | 
			
		||||
	}
 | 
			
		||||
	close $out;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue