mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	fs/proc/task_mmu: stop using linked list and highest_vm_end
Remove references to mm_struct linked list and highest_vm_end for when they are removed Link: https://lkml.kernel.org/r/20220906194824.2110408-44-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com> Tested-by: Yu Zhao <yuzhao@google.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Howells <dhowells@redhat.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: SeongJae Park <sj@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									5f14b9246e
								
							
						
					
					
						commit
						c4c84f0628
					
				
					 2 changed files with 42 additions and 33 deletions
				
			
		| 
						 | 
				
			
			@ -285,7 +285,7 @@ struct proc_maps_private {
 | 
			
		|||
	struct task_struct *task;
 | 
			
		||||
	struct mm_struct *mm;
 | 
			
		||||
#ifdef CONFIG_MMU
 | 
			
		||||
	struct vm_area_struct *tail_vma;
 | 
			
		||||
	struct vma_iterator iter;
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef CONFIG_NUMA
 | 
			
		||||
	struct mempolicy *task_mempolicy;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -123,12 +123,26 @@ static void release_task_mempolicy(struct proc_maps_private *priv)
 | 
			
		|||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
 | 
			
		||||
						loff_t *ppos)
 | 
			
		||||
{
 | 
			
		||||
	struct vm_area_struct *vma = vma_next(&priv->iter);
 | 
			
		||||
 | 
			
		||||
	if (vma) {
 | 
			
		||||
		*ppos = vma->vm_start;
 | 
			
		||||
	} else {
 | 
			
		||||
		*ppos = -2UL;
 | 
			
		||||
		vma = get_gate_vma(priv->mm);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return vma;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void *m_start(struct seq_file *m, loff_t *ppos)
 | 
			
		||||
{
 | 
			
		||||
	struct proc_maps_private *priv = m->private;
 | 
			
		||||
	unsigned long last_addr = *ppos;
 | 
			
		||||
	struct mm_struct *mm;
 | 
			
		||||
	struct vm_area_struct *vma;
 | 
			
		||||
 | 
			
		||||
	/* See m_next(). Zero at the start or after lseek. */
 | 
			
		||||
	if (last_addr == -1UL)
 | 
			
		||||
| 
						 | 
				
			
			@ -152,31 +166,21 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
 | 
			
		|||
		return ERR_PTR(-EINTR);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vma_iter_init(&priv->iter, mm, last_addr);
 | 
			
		||||
	hold_task_mempolicy(priv);
 | 
			
		||||
	priv->tail_vma = get_gate_vma(mm);
 | 
			
		||||
	if (last_addr == -2UL)
 | 
			
		||||
		return get_gate_vma(mm);
 | 
			
		||||
 | 
			
		||||
	vma = find_vma(mm, last_addr);
 | 
			
		||||
	if (vma)
 | 
			
		||||
		return vma;
 | 
			
		||||
 | 
			
		||||
	return priv->tail_vma;
 | 
			
		||||
	return proc_get_vma(priv, ppos);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
 | 
			
		||||
{
 | 
			
		||||
	struct proc_maps_private *priv = m->private;
 | 
			
		||||
	struct vm_area_struct *next, *vma = v;
 | 
			
		||||
 | 
			
		||||
	if (vma == priv->tail_vma)
 | 
			
		||||
		next = NULL;
 | 
			
		||||
	else if (vma->vm_next)
 | 
			
		||||
		next = vma->vm_next;
 | 
			
		||||
	else
 | 
			
		||||
		next = priv->tail_vma;
 | 
			
		||||
 | 
			
		||||
	*ppos = next ? next->vm_start : -1UL;
 | 
			
		||||
 | 
			
		||||
	return next;
 | 
			
		||||
	if (*ppos == -2UL) {
 | 
			
		||||
		*ppos = -1UL;
 | 
			
		||||
		return NULL;
 | 
			
		||||
	}
 | 
			
		||||
	return proc_get_vma(m->private, ppos);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void m_stop(struct seq_file *m, void *v)
 | 
			
		||||
| 
						 | 
				
			
			@ -876,16 +880,16 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
 | 
			
		|||
{
 | 
			
		||||
	struct proc_maps_private *priv = m->private;
 | 
			
		||||
	struct mem_size_stats mss;
 | 
			
		||||
	struct mm_struct *mm;
 | 
			
		||||
	struct mm_struct *mm = priv->mm;
 | 
			
		||||
	struct vm_area_struct *vma;
 | 
			
		||||
	unsigned long last_vma_end = 0;
 | 
			
		||||
	unsigned long vma_start = 0, last_vma_end = 0;
 | 
			
		||||
	int ret = 0;
 | 
			
		||||
	MA_STATE(mas, &mm->mm_mt, 0, 0);
 | 
			
		||||
 | 
			
		||||
	priv->task = get_proc_task(priv->inode);
 | 
			
		||||
	if (!priv->task)
 | 
			
		||||
		return -ESRCH;
 | 
			
		||||
 | 
			
		||||
	mm = priv->mm;
 | 
			
		||||
	if (!mm || !mmget_not_zero(mm)) {
 | 
			
		||||
		ret = -ESRCH;
 | 
			
		||||
		goto out_put_task;
 | 
			
		||||
| 
						 | 
				
			
			@ -898,8 +902,13 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
 | 
			
		|||
		goto out_put_mm;
 | 
			
		||||
 | 
			
		||||
	hold_task_mempolicy(priv);
 | 
			
		||||
	vma = mas_find(&mas, 0);
 | 
			
		||||
 | 
			
		||||
	for (vma = priv->mm->mmap; vma;) {
 | 
			
		||||
	if (unlikely(!vma))
 | 
			
		||||
		goto empty_set;
 | 
			
		||||
 | 
			
		||||
	vma_start = vma->vm_start;
 | 
			
		||||
	do {
 | 
			
		||||
		smap_gather_stats(vma, &mss, 0);
 | 
			
		||||
		last_vma_end = vma->vm_end;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -908,6 +917,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
 | 
			
		|||
		 * access it for write request.
 | 
			
		||||
		 */
 | 
			
		||||
		if (mmap_lock_is_contended(mm)) {
 | 
			
		||||
			mas_pause(&mas);
 | 
			
		||||
			mmap_read_unlock(mm);
 | 
			
		||||
			ret = mmap_read_lock_killable(mm);
 | 
			
		||||
			if (ret) {
 | 
			
		||||
| 
						 | 
				
			
			@ -951,7 +961,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
 | 
			
		|||
			 *    contains last_vma_end.
 | 
			
		||||
			 *    Iterate VMA' from last_vma_end.
 | 
			
		||||
			 */
 | 
			
		||||
			vma = find_vma(mm, last_vma_end - 1);
 | 
			
		||||
			vma = mas_find(&mas, ULONG_MAX);
 | 
			
		||||
			/* Case 3 above */
 | 
			
		||||
			if (!vma)
 | 
			
		||||
				break;
 | 
			
		||||
| 
						 | 
				
			
			@ -965,11 +975,10 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
 | 
			
		|||
				smap_gather_stats(vma, &mss, last_vma_end);
 | 
			
		||||
		}
 | 
			
		||||
		/* Case 2 above */
 | 
			
		||||
		vma = vma->vm_next;
 | 
			
		||||
	}
 | 
			
		||||
	} while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
 | 
			
		||||
 | 
			
		||||
	show_vma_header_prefix(m, priv->mm->mmap->vm_start,
 | 
			
		||||
			       last_vma_end, 0, 0, 0, 0);
 | 
			
		||||
empty_set:
 | 
			
		||||
	show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
 | 
			
		||||
	seq_pad(m, ' ');
 | 
			
		||||
	seq_puts(m, "[rollup]\n");
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1262,6 +1271,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 | 
			
		|||
		return -ESRCH;
 | 
			
		||||
	mm = get_task_mm(task);
 | 
			
		||||
	if (mm) {
 | 
			
		||||
		MA_STATE(mas, &mm->mm_mt, 0, 0);
 | 
			
		||||
		struct mmu_notifier_range range;
 | 
			
		||||
		struct clear_refs_private cp = {
 | 
			
		||||
			.type = type,
 | 
			
		||||
| 
						 | 
				
			
			@ -1281,7 +1291,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 | 
			
		|||
		}
 | 
			
		||||
 | 
			
		||||
		if (type == CLEAR_REFS_SOFT_DIRTY) {
 | 
			
		||||
			for (vma = mm->mmap; vma; vma = vma->vm_next) {
 | 
			
		||||
			mas_for_each(&mas, vma, ULONG_MAX) {
 | 
			
		||||
				if (!(vma->vm_flags & VM_SOFTDIRTY))
 | 
			
		||||
					continue;
 | 
			
		||||
				vma->vm_flags &= ~VM_SOFTDIRTY;
 | 
			
		||||
| 
						 | 
				
			
			@ -1293,8 +1303,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 | 
			
		|||
						0, NULL, mm, 0, -1UL);
 | 
			
		||||
			mmu_notifier_invalidate_range_start(&range);
 | 
			
		||||
		}
 | 
			
		||||
		walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops,
 | 
			
		||||
				&cp);
 | 
			
		||||
		walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
 | 
			
		||||
		if (type == CLEAR_REFS_SOFT_DIRTY) {
 | 
			
		||||
			mmu_notifier_invalidate_range_end(&range);
 | 
			
		||||
			flush_tlb_mm(mm);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue