mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	Merge branch 'linus' into locking/core, to resolve conflicts
Conflicts:
	include/linux/mm_types.h
	mm/huge_memory.c
I removed the smp_mb__before_spinlock() like the following commit does:
  8b1b436dd1 ("mm, locking: Rework {set,clear,mm}_tlb_flush_pending()")
and fixed up the affected commits.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
			
			
This commit is contained in:
		
						commit
						040cca3ab2
					
				
					 81 changed files with 678 additions and 362 deletions
				
			
		| 
						 | 
				
			
			@ -14004,6 +14004,7 @@ F:	drivers/block/virtio_blk.c
 | 
			
		|||
F:	include/linux/virtio*.h
 | 
			
		||||
F:	include/uapi/linux/virtio_*.h
 | 
			
		||||
F:	drivers/crypto/virtio/
 | 
			
		||||
F:	mm/balloon_compaction.c
 | 
			
		||||
 | 
			
		||||
VIRTIO CRYPTO DRIVER
 | 
			
		||||
M:	Gonglei <arei.gonglei@huawei.com>
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -148,7 +148,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 | 
			
		||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 | 
			
		||||
			unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	tlb->mm = mm;
 | 
			
		||||
	tlb->fullmm = !(start | (end+1));
 | 
			
		||||
| 
						 | 
				
			
			@ -166,8 +167,14 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 | 
			
		||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
			unsigned long start, unsigned long end, bool force)
 | 
			
		||||
{
 | 
			
		||||
	if (force) {
 | 
			
		||||
		tlb->range_start = start;
 | 
			
		||||
		tlb->range_end = end;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	tlb_flush_mmu(tlb);
 | 
			
		||||
 | 
			
		||||
	/* keep the page table cache within bounds */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -168,7 +168,8 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 | 
			
		|||
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 | 
			
		||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 | 
			
		||||
			unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	tlb->mm = mm;
 | 
			
		||||
	tlb->max = ARRAY_SIZE(tlb->local);
 | 
			
		||||
| 
						 | 
				
			
			@ -185,8 +186,11 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
 | 
			
		|||
 * collected.
 | 
			
		||||
 */
 | 
			
		||||
static inline void
 | 
			
		||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 | 
			
		||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
			unsigned long start, unsigned long end, bool force)
 | 
			
		||||
{
 | 
			
		||||
	if (force)
 | 
			
		||||
		tlb->need_flush = 1;
 | 
			
		||||
	/*
 | 
			
		||||
	 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
 | 
			
		||||
	 * tlb->end_addr.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -47,10 +47,9 @@ struct mmu_table_batch {
 | 
			
		|||
extern void tlb_table_flush(struct mmu_gather *tlb);
 | 
			
		||||
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 | 
			
		||||
 | 
			
		||||
static inline void tlb_gather_mmu(struct mmu_gather *tlb,
 | 
			
		||||
				  struct mm_struct *mm,
 | 
			
		||||
				  unsigned long start,
 | 
			
		||||
				  unsigned long end)
 | 
			
		||||
static inline void
 | 
			
		||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 | 
			
		||||
			unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	tlb->mm = mm;
 | 
			
		||||
	tlb->start = start;
 | 
			
		||||
| 
						 | 
				
			
			@ -76,9 +75,15 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 | 
			
		|||
	tlb_flush_mmu_free(tlb);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
				  unsigned long start, unsigned long end)
 | 
			
		||||
static inline void
 | 
			
		||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
		unsigned long start, unsigned long end, bool force)
 | 
			
		||||
{
 | 
			
		||||
	if (force) {
 | 
			
		||||
		tlb->start = start;
 | 
			
		||||
		tlb->end = end;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	tlb_flush_mmu(tlb);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -36,7 +36,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 | 
			
		||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 | 
			
		||||
		unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	tlb->mm = mm;
 | 
			
		||||
	tlb->start = start;
 | 
			
		||||
| 
						 | 
				
			
			@ -47,9 +48,10 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 | 
			
		||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
		unsigned long start, unsigned long end, bool force)
 | 
			
		||||
{
 | 
			
		||||
	if (tlb->fullmm)
 | 
			
		||||
	if (tlb->fullmm || force)
 | 
			
		||||
		flush_tlb_mm(tlb->mm);
 | 
			
		||||
 | 
			
		||||
	/* keep the page table cache within bounds */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -47,10 +47,26 @@
 | 
			
		|||
#define SUN4V_CHIP_NIAGARA5	0x05
 | 
			
		||||
#define SUN4V_CHIP_SPARC_M6	0x06
 | 
			
		||||
#define SUN4V_CHIP_SPARC_M7	0x07
 | 
			
		||||
#define SUN4V_CHIP_SPARC_M8	0x08
 | 
			
		||||
#define SUN4V_CHIP_SPARC64X	0x8a
 | 
			
		||||
#define SUN4V_CHIP_SPARC_SN	0x8b
 | 
			
		||||
#define SUN4V_CHIP_UNKNOWN	0xff
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * The following CPU_ID_xxx constants are used
 | 
			
		||||
 * to identify the CPU type in the setup phase
 | 
			
		||||
 * (see head_64.S)
 | 
			
		||||
 */
 | 
			
		||||
#define CPU_ID_NIAGARA1		('1')
 | 
			
		||||
#define CPU_ID_NIAGARA2		('2')
 | 
			
		||||
#define CPU_ID_NIAGARA3		('3')
 | 
			
		||||
#define CPU_ID_NIAGARA4		('4')
 | 
			
		||||
#define CPU_ID_NIAGARA5		('5')
 | 
			
		||||
#define CPU_ID_M6		('6')
 | 
			
		||||
#define CPU_ID_M7		('7')
 | 
			
		||||
#define CPU_ID_M8		('8')
 | 
			
		||||
#define CPU_ID_SONOMA1		('N')
 | 
			
		||||
 | 
			
		||||
#ifndef __ASSEMBLY__
 | 
			
		||||
 | 
			
		||||
enum ultra_tlb_layout {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -506,6 +506,12 @@ static void __init sun4v_cpu_probe(void)
 | 
			
		|||
		sparc_pmu_type = "sparc-m7";
 | 
			
		||||
		break;
 | 
			
		||||
 | 
			
		||||
	case SUN4V_CHIP_SPARC_M8:
 | 
			
		||||
		sparc_cpu_type = "SPARC-M8";
 | 
			
		||||
		sparc_fpu_type = "SPARC-M8 integrated FPU";
 | 
			
		||||
		sparc_pmu_type = "sparc-m8";
 | 
			
		||||
		break;
 | 
			
		||||
 | 
			
		||||
	case SUN4V_CHIP_SPARC_SN:
 | 
			
		||||
		sparc_cpu_type = "SPARC-SN";
 | 
			
		||||
		sparc_fpu_type = "SPARC-SN integrated FPU";
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -328,6 +328,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
 | 
			
		|||
	case SUN4V_CHIP_NIAGARA5:
 | 
			
		||||
	case SUN4V_CHIP_SPARC_M6:
 | 
			
		||||
	case SUN4V_CHIP_SPARC_M7:
 | 
			
		||||
	case SUN4V_CHIP_SPARC_M8:
 | 
			
		||||
	case SUN4V_CHIP_SPARC_SN:
 | 
			
		||||
	case SUN4V_CHIP_SPARC64X:
 | 
			
		||||
		rover_inc_table = niagara_iterate_method;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -424,22 +424,25 @@ EXPORT_SYMBOL(sun4v_chip_type)
 | 
			
		|||
	 nop
 | 
			
		||||
 | 
			
		||||
70:	ldub	[%g1 + 7], %g2
 | 
			
		||||
	cmp	%g2, '3'
 | 
			
		||||
	cmp	%g2, CPU_ID_NIAGARA3
 | 
			
		||||
	be,pt	%xcc, 5f
 | 
			
		||||
	 mov	SUN4V_CHIP_NIAGARA3, %g4
 | 
			
		||||
	cmp	%g2, '4'
 | 
			
		||||
	cmp	%g2, CPU_ID_NIAGARA4
 | 
			
		||||
	be,pt	%xcc, 5f
 | 
			
		||||
	 mov	SUN4V_CHIP_NIAGARA4, %g4
 | 
			
		||||
	cmp	%g2, '5'
 | 
			
		||||
	cmp	%g2, CPU_ID_NIAGARA5
 | 
			
		||||
	be,pt	%xcc, 5f
 | 
			
		||||
	 mov	SUN4V_CHIP_NIAGARA5, %g4
 | 
			
		||||
	cmp	%g2, '6'
 | 
			
		||||
	cmp	%g2, CPU_ID_M6
 | 
			
		||||
	be,pt	%xcc, 5f
 | 
			
		||||
	 mov	SUN4V_CHIP_SPARC_M6, %g4
 | 
			
		||||
	cmp	%g2, '7'
 | 
			
		||||
	cmp	%g2, CPU_ID_M7
 | 
			
		||||
	be,pt	%xcc, 5f
 | 
			
		||||
	 mov	SUN4V_CHIP_SPARC_M7, %g4
 | 
			
		||||
	cmp	%g2, 'N'
 | 
			
		||||
	cmp	%g2, CPU_ID_M8
 | 
			
		||||
	be,pt	%xcc, 5f
 | 
			
		||||
	 mov	SUN4V_CHIP_SPARC_M8, %g4
 | 
			
		||||
	cmp	%g2, CPU_ID_SONOMA1
 | 
			
		||||
	be,pt	%xcc, 5f
 | 
			
		||||
	 mov	SUN4V_CHIP_SPARC_SN, %g4
 | 
			
		||||
	ba,pt	%xcc, 49f
 | 
			
		||||
| 
						 | 
				
			
			@ -448,10 +451,10 @@ EXPORT_SYMBOL(sun4v_chip_type)
 | 
			
		|||
91:	sethi	%hi(prom_cpu_compatible), %g1
 | 
			
		||||
	or	%g1, %lo(prom_cpu_compatible), %g1
 | 
			
		||||
	ldub	[%g1 + 17], %g2
 | 
			
		||||
	cmp	%g2, '1'
 | 
			
		||||
	cmp	%g2, CPU_ID_NIAGARA1
 | 
			
		||||
	be,pt	%xcc, 5f
 | 
			
		||||
	 mov	SUN4V_CHIP_NIAGARA1, %g4
 | 
			
		||||
	cmp	%g2, '2'
 | 
			
		||||
	cmp	%g2, CPU_ID_NIAGARA2
 | 
			
		||||
	be,pt	%xcc, 5f
 | 
			
		||||
	 mov	SUN4V_CHIP_NIAGARA2, %g4
 | 
			
		||||
	
 | 
			
		||||
| 
						 | 
				
			
			@ -600,6 +603,9 @@ niagara_tlb_fixup:
 | 
			
		|||
	be,pt	%xcc, niagara4_patch
 | 
			
		||||
	 nop
 | 
			
		||||
	cmp	%g1, SUN4V_CHIP_SPARC_M7
 | 
			
		||||
	be,pt	%xcc, niagara4_patch
 | 
			
		||||
	 nop
 | 
			
		||||
	cmp	%g1, SUN4V_CHIP_SPARC_M8
 | 
			
		||||
	be,pt	%xcc, niagara4_patch
 | 
			
		||||
	 nop
 | 
			
		||||
	cmp	%g1, SUN4V_CHIP_SPARC_SN
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -288,10 +288,17 @@ static void __init sun4v_patch(void)
 | 
			
		|||
 | 
			
		||||
	sun4v_patch_2insn_range(&__sun4v_2insn_patch,
 | 
			
		||||
				&__sun4v_2insn_patch_end);
 | 
			
		||||
	if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
 | 
			
		||||
	    sun4v_chip_type == SUN4V_CHIP_SPARC_SN)
 | 
			
		||||
 | 
			
		||||
	switch (sun4v_chip_type) {
 | 
			
		||||
	case SUN4V_CHIP_SPARC_M7:
 | 
			
		||||
	case SUN4V_CHIP_SPARC_M8:
 | 
			
		||||
	case SUN4V_CHIP_SPARC_SN:
 | 
			
		||||
		sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
 | 
			
		||||
					 &__sun_m7_2insn_patch_end);
 | 
			
		||||
		break;
 | 
			
		||||
	default:
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	sun4v_hvapi_init();
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -529,6 +536,7 @@ static void __init init_sparc64_elf_hwcap(void)
 | 
			
		|||
		    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
 | 
			
		||||
		    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
 | 
			
		||||
		    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
 | 
			
		||||
		    sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
 | 
			
		||||
		    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
 | 
			
		||||
		    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
 | 
			
		||||
			cap |= HWCAP_SPARC_BLKINIT;
 | 
			
		||||
| 
						 | 
				
			
			@ -538,6 +546,7 @@ static void __init init_sparc64_elf_hwcap(void)
 | 
			
		|||
		    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
 | 
			
		||||
		    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
 | 
			
		||||
		    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
 | 
			
		||||
		    sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
 | 
			
		||||
		    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
 | 
			
		||||
		    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
 | 
			
		||||
			cap |= HWCAP_SPARC_N2;
 | 
			
		||||
| 
						 | 
				
			
			@ -568,6 +577,7 @@ static void __init init_sparc64_elf_hwcap(void)
 | 
			
		|||
			    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
 | 
			
		||||
			    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
 | 
			
		||||
			    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
 | 
			
		||||
			    sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
 | 
			
		||||
			    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
 | 
			
		||||
			    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
 | 
			
		||||
				cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
 | 
			
		||||
| 
						 | 
				
			
			@ -578,6 +588,7 @@ static void __init init_sparc64_elf_hwcap(void)
 | 
			
		|||
			    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
 | 
			
		||||
			    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
 | 
			
		||||
			    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
 | 
			
		||||
			    sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
 | 
			
		||||
			    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
 | 
			
		||||
			    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
 | 
			
		||||
				cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1944,12 +1944,22 @@ static void __init setup_page_offset(void)
 | 
			
		|||
			break;
 | 
			
		||||
		case SUN4V_CHIP_SPARC_M7:
 | 
			
		||||
		case SUN4V_CHIP_SPARC_SN:
 | 
			
		||||
		default:
 | 
			
		||||
			/* M7 and later support 52-bit virtual addresses.  */
 | 
			
		||||
			sparc64_va_hole_top =    0xfff8000000000000UL;
 | 
			
		||||
			sparc64_va_hole_bottom = 0x0008000000000000UL;
 | 
			
		||||
			max_phys_bits = 49;
 | 
			
		||||
			break;
 | 
			
		||||
		case SUN4V_CHIP_SPARC_M8:
 | 
			
		||||
		default:
 | 
			
		||||
			/* M8 and later support 54-bit virtual addresses.
 | 
			
		||||
			 * However, restricting M8 and above VA bits to 53
 | 
			
		||||
			 * as 4-level page table cannot support more than
 | 
			
		||||
			 * 53 VA bits.
 | 
			
		||||
			 */
 | 
			
		||||
			sparc64_va_hole_top =    0xfff0000000000000UL;
 | 
			
		||||
			sparc64_va_hole_bottom = 0x0010000000000000UL;
 | 
			
		||||
			max_phys_bits = 51;
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -2161,6 +2171,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
 | 
			
		|||
	 */
 | 
			
		||||
	switch (sun4v_chip_type) {
 | 
			
		||||
	case SUN4V_CHIP_SPARC_M7:
 | 
			
		||||
	case SUN4V_CHIP_SPARC_M8:
 | 
			
		||||
	case SUN4V_CHIP_SPARC_SN:
 | 
			
		||||
		pagecv_flag = 0x00;
 | 
			
		||||
		break;
 | 
			
		||||
| 
						 | 
				
			
			@ -2313,6 +2324,7 @@ void __init paging_init(void)
 | 
			
		|||
	 */
 | 
			
		||||
	switch (sun4v_chip_type) {
 | 
			
		||||
	case SUN4V_CHIP_SPARC_M7:
 | 
			
		||||
	case SUN4V_CHIP_SPARC_M8:
 | 
			
		||||
	case SUN4V_CHIP_SPARC_SN:
 | 
			
		||||
		page_cache4v_flag = _PAGE_CP_4V;
 | 
			
		||||
		break;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -45,7 +45,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 | 
			
		||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 | 
			
		||||
		unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	tlb->mm = mm;
 | 
			
		||||
	tlb->start = start;
 | 
			
		||||
| 
						 | 
				
			
			@ -80,13 +81,19 @@ tlb_flush_mmu(struct mmu_gather *tlb)
 | 
			
		|||
	tlb_flush_mmu_free(tlb);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* tlb_finish_mmu
 | 
			
		||||
/* arch_tlb_finish_mmu
 | 
			
		||||
 *	Called at the end of the shootdown operation to free up any resources
 | 
			
		||||
 *	that were required.
 | 
			
		||||
 */
 | 
			
		||||
static inline void
 | 
			
		||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 | 
			
		||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
		unsigned long start, unsigned long end, bool force)
 | 
			
		||||
{
 | 
			
		||||
	if (force) {
 | 
			
		||||
		tlb->start = start;
 | 
			
		||||
		tlb->end = end;
 | 
			
		||||
		tlb->need_flush = 1;
 | 
			
		||||
	}
 | 
			
		||||
	tlb_flush_mmu(tlb);
 | 
			
		||||
 | 
			
		||||
	/* keep the page table cache within bounds */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -875,6 +875,56 @@ static void print_version(void)
 | 
			
		|||
		printk(KERN_INFO "%s", version);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct vdc_check_port_data {
 | 
			
		||||
	int	dev_no;
 | 
			
		||||
	char	*type;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static int vdc_device_probed(struct device *dev, void *arg)
 | 
			
		||||
{
 | 
			
		||||
	struct vio_dev *vdev = to_vio_dev(dev);
 | 
			
		||||
	struct vdc_check_port_data *port_data;
 | 
			
		||||
 | 
			
		||||
	port_data = (struct vdc_check_port_data *)arg;
 | 
			
		||||
 | 
			
		||||
	if ((vdev->dev_no == port_data->dev_no) &&
 | 
			
		||||
	    (!(strcmp((char *)&vdev->type, port_data->type))) &&
 | 
			
		||||
		dev_get_drvdata(dev)) {
 | 
			
		||||
		/* This device has already been configured
 | 
			
		||||
		 * by vdc_port_probe()
 | 
			
		||||
		 */
 | 
			
		||||
		return 1;
 | 
			
		||||
	} else {
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Determine whether the VIO device is part of an mpgroup
 | 
			
		||||
 * by locating all the virtual-device-port nodes associated
 | 
			
		||||
 * with the parent virtual-device node for the VIO device
 | 
			
		||||
 * and checking whether any of these nodes are vdc-ports
 | 
			
		||||
 * which have already been configured.
 | 
			
		||||
 *
 | 
			
		||||
 * Returns true if this device is part of an mpgroup and has
 | 
			
		||||
 * already been probed.
 | 
			
		||||
 */
 | 
			
		||||
static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
 | 
			
		||||
{
 | 
			
		||||
	struct vdc_check_port_data port_data;
 | 
			
		||||
	struct device *dev;
 | 
			
		||||
 | 
			
		||||
	port_data.dev_no = vdev->dev_no;
 | 
			
		||||
	port_data.type = (char *)&vdev->type;
 | 
			
		||||
 | 
			
		||||
	dev = device_find_child(vdev->dev.parent, &port_data,
 | 
			
		||||
				vdc_device_probed);
 | 
			
		||||
 | 
			
		||||
	if (dev)
 | 
			
		||||
		return true;
 | 
			
		||||
 | 
			
		||||
	return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 | 
			
		||||
{
 | 
			
		||||
	struct mdesc_handle *hp;
 | 
			
		||||
| 
						 | 
				
			
			@ -893,6 +943,14 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 | 
			
		|||
		goto err_out_release_mdesc;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Check if this device is part of an mpgroup */
 | 
			
		||||
	if (vdc_port_mpgroup_check(vdev)) {
 | 
			
		||||
		printk(KERN_WARNING
 | 
			
		||||
			"VIO: Ignoring extra vdisk port %s",
 | 
			
		||||
			dev_name(&vdev->dev));
 | 
			
		||||
		goto err_out_release_mdesc;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	port = kzalloc(sizeof(*port), GFP_KERNEL);
 | 
			
		||||
	err = -ENOMEM;
 | 
			
		||||
	if (!port) {
 | 
			
		||||
| 
						 | 
				
			
			@ -943,6 +1001,9 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 | 
			
		|||
	if (err)
 | 
			
		||||
		goto err_out_free_tx_ring;
 | 
			
		||||
 | 
			
		||||
	/* Note that the device driver_data is used to determine
 | 
			
		||||
	 * whether the port has been probed.
 | 
			
		||||
	 */
 | 
			
		||||
	dev_set_drvdata(&vdev->dev, port);
 | 
			
		||||
 | 
			
		||||
	mdesc_release(hp);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -308,7 +308,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
 | 
			
		|||
		struct device_attribute *attr, const char *buf, size_t len)
 | 
			
		||||
{
 | 
			
		||||
	struct zram *zram = dev_to_zram(dev);
 | 
			
		||||
	char compressor[CRYPTO_MAX_ALG_NAME];
 | 
			
		||||
	char compressor[ARRAY_SIZE(zram->compressor)];
 | 
			
		||||
	size_t sz;
 | 
			
		||||
 | 
			
		||||
	strlcpy(compressor, buf, sizeof(compressor));
 | 
			
		||||
| 
						 | 
				
			
			@ -327,7 +327,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
 | 
			
		|||
		return -EBUSY;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	strlcpy(zram->compressor, compressor, sizeof(compressor));
 | 
			
		||||
	strcpy(zram->compressor, compressor);
 | 
			
		||||
	up_write(&zram->init_lock);
 | 
			
		||||
	return len;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -304,7 +304,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
 | 
			
		|||
{
 | 
			
		||||
	struct sync_file *sync_file = file->private_data;
 | 
			
		||||
 | 
			
		||||
	if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
 | 
			
		||||
	if (test_bit(POLL_ENABLED, &sync_file->flags))
 | 
			
		||||
		dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
 | 
			
		||||
	dma_fence_put(sync_file->fence);
 | 
			
		||||
	kfree(sync_file);
 | 
			
		||||
| 
						 | 
				
			
			@ -318,7 +318,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
 | 
			
		|||
 | 
			
		||||
	poll_wait(file, &sync_file->wq, wait);
 | 
			
		||||
 | 
			
		||||
	if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
 | 
			
		||||
	if (list_empty(&sync_file->cb.node) &&
 | 
			
		||||
	    !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
 | 
			
		||||
		if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
 | 
			
		||||
					   fence_check_cb_func) < 0)
 | 
			
		||||
			wake_up_all(&sync_file->wq);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
 | 
			
		|||
 | 
			
		||||
	/* port@2 is the output port */
 | 
			
		||||
	ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
 | 
			
		||||
	if (ret)
 | 
			
		||||
	if (ret && ret != -ENODEV)
 | 
			
		||||
		return ret;
 | 
			
		||||
 | 
			
		||||
	/* Shut down GPIO is optional */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
 | 
			
		|||
		if (ret)
 | 
			
		||||
			return ret;
 | 
			
		||||
 | 
			
		||||
		if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) {
 | 
			
		||||
			DRM_ERROR("relocation %u outside object", i);
 | 
			
		||||
		if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
 | 
			
		||||
			DRM_ERROR("relocation %u outside object\n", i);
 | 
			
		||||
			return -EINVAL;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -145,13 +145,19 @@ static struct drm_framebuffer *
 | 
			
		|||
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 | 
			
		||||
		      const struct drm_mode_fb_cmd2 *mode_cmd)
 | 
			
		||||
{
 | 
			
		||||
	const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
 | 
			
		||||
	struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
 | 
			
		||||
	struct drm_gem_object *obj;
 | 
			
		||||
	struct drm_framebuffer *fb;
 | 
			
		||||
	int i;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
 | 
			
		||||
	for (i = 0; i < info->num_planes; i++) {
 | 
			
		||||
		unsigned int height = (i == 0) ? mode_cmd->height :
 | 
			
		||||
				     DIV_ROUND_UP(mode_cmd->height, info->vsub);
 | 
			
		||||
		unsigned long size = height * mode_cmd->pitches[i] +
 | 
			
		||||
				     mode_cmd->offsets[i];
 | 
			
		||||
 | 
			
		||||
		obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
 | 
			
		||||
		if (!obj) {
 | 
			
		||||
			DRM_ERROR("failed to lookup gem object\n");
 | 
			
		||||
| 
						 | 
				
			
			@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 | 
			
		|||
		}
 | 
			
		||||
 | 
			
		||||
		exynos_gem[i] = to_exynos_gem(obj);
 | 
			
		||||
 | 
			
		||||
		if (size > exynos_gem[i]->size) {
 | 
			
		||||
			i++;
 | 
			
		||||
			ret = -EINVAL;
 | 
			
		||||
			goto err;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -46,6 +46,8 @@
 | 
			
		|||
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
 | 
			
		||||
		((a)->lrca == (b)->lrca))
 | 
			
		||||
 | 
			
		||||
static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
 | 
			
		||||
 | 
			
		||||
static int context_switch_events[] = {
 | 
			
		||||
	[RCS] = RCS_AS_CONTEXT_SWITCH,
 | 
			
		||||
	[BCS] = BCS_AS_CONTEXT_SWITCH,
 | 
			
		||||
| 
						 | 
				
			
			@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 | 
			
		|||
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
 | 
			
		||||
{
 | 
			
		||||
	struct intel_vgpu *vgpu = workload->vgpu;
 | 
			
		||||
	struct intel_vgpu_execlist *execlist =
 | 
			
		||||
		&vgpu->execlist[workload->ring_id];
 | 
			
		||||
	int ring_id = workload->ring_id;
 | 
			
		||||
	struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
 | 
			
		||||
	struct intel_vgpu_workload *next_workload;
 | 
			
		||||
	struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
 | 
			
		||||
	struct list_head *next = workload_q_head(vgpu, ring_id)->next;
 | 
			
		||||
	bool lite_restore = false;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
 | 
			
		|||
	release_shadow_batch_buffer(workload);
 | 
			
		||||
	release_shadow_wa_ctx(&workload->wa_ctx);
 | 
			
		||||
 | 
			
		||||
	if (workload->status || vgpu->resetting)
 | 
			
		||||
	if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
 | 
			
		||||
		/* if workload->status is not successful means HW GPU
 | 
			
		||||
		 * has occurred GPU hang or something wrong with i915/GVT,
 | 
			
		||||
		 * and GVT won't inject context switch interrupt to guest.
 | 
			
		||||
		 * So this error is a vGPU hang actually to the guest.
 | 
			
		||||
		 * According to this we should emunlate a vGPU hang. If
 | 
			
		||||
		 * there are pending workloads which are already submitted
 | 
			
		||||
		 * from guest, we should clean them up like HW GPU does.
 | 
			
		||||
		 *
 | 
			
		||||
		 * if it is in middle of engine resetting, the pending
 | 
			
		||||
		 * workloads won't be submitted to HW GPU and will be
 | 
			
		||||
		 * cleaned up during the resetting process later, so doing
 | 
			
		||||
		 * the workload clean up here doesn't have any impact.
 | 
			
		||||
		 **/
 | 
			
		||||
		clean_workloads(vgpu, ENGINE_MASK(ring_id));
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
 | 
			
		||||
	if (!list_empty(workload_q_head(vgpu, ring_id))) {
 | 
			
		||||
		struct execlist_ctx_descriptor_format *this_desc, *next_desc;
 | 
			
		||||
 | 
			
		||||
		next_workload = container_of(next,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
 | 
			
		|||
	struct intel_gvt_device_info *info = &gvt->device_info;
 | 
			
		||||
	struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
 | 
			
		||||
	struct intel_gvt_mmio_info *e;
 | 
			
		||||
	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
 | 
			
		||||
	int num = gvt->mmio.num_mmio_block;
 | 
			
		||||
	struct gvt_firmware_header *h;
 | 
			
		||||
	void *firmware;
 | 
			
		||||
	void *p;
 | 
			
		||||
	unsigned long size, crc32_start;
 | 
			
		||||
	int i;
 | 
			
		||||
	int i, j;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
 | 
			
		||||
| 
						 | 
				
			
			@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
 | 
			
		|||
	hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
 | 
			
		||||
		*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < num; i++, block++) {
 | 
			
		||||
		for (j = 0; j < block->size; j += 4)
 | 
			
		||||
			*(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
 | 
			
		||||
				I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
 | 
			
		||||
							block->offset) + j));
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	memcpy(gvt->firmware.mmio, p, info->mmio_size);
 | 
			
		||||
 | 
			
		||||
	crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -149,7 +149,7 @@ struct intel_vgpu {
 | 
			
		|||
	bool active;
 | 
			
		||||
	bool pv_notified;
 | 
			
		||||
	bool failsafe;
 | 
			
		||||
	bool resetting;
 | 
			
		||||
	unsigned int resetting_eng;
 | 
			
		||||
	void *sched_data;
 | 
			
		||||
	struct vgpu_sched_ctl sched_ctl;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -195,6 +195,15 @@ struct intel_gvt_fence {
 | 
			
		|||
	unsigned long vgpu_allocated_fence_num;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/* Special MMIO blocks. */
 | 
			
		||||
struct gvt_mmio_block {
 | 
			
		||||
	unsigned int device;
 | 
			
		||||
	i915_reg_t   offset;
 | 
			
		||||
	unsigned int size;
 | 
			
		||||
	gvt_mmio_func read;
 | 
			
		||||
	gvt_mmio_func write;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#define INTEL_GVT_MMIO_HASH_BITS 11
 | 
			
		||||
 | 
			
		||||
struct intel_gvt_mmio {
 | 
			
		||||
| 
						 | 
				
			
			@ -214,6 +223,9 @@ struct intel_gvt_mmio {
 | 
			
		|||
/* This reg could be accessed by unaligned address */
 | 
			
		||||
#define F_UNALIGN	(1 << 6)
 | 
			
		||||
 | 
			
		||||
	struct gvt_mmio_block *mmio_block;
 | 
			
		||||
	unsigned int num_mmio_block;
 | 
			
		||||
 | 
			
		||||
	DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
 | 
			
		||||
	unsigned int num_tracked_mmio;
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
 | 
			
		|||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Special MMIO blocks. */
 | 
			
		||||
static struct gvt_mmio_block {
 | 
			
		||||
	unsigned int device;
 | 
			
		||||
	i915_reg_t   offset;
 | 
			
		||||
	unsigned int size;
 | 
			
		||||
	gvt_mmio_func read;
 | 
			
		||||
	gvt_mmio_func write;
 | 
			
		||||
} gvt_mmio_blocks[] = {
 | 
			
		||||
	{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
 | 
			
		||||
	{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
 | 
			
		||||
	{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
 | 
			
		||||
		pvinfo_mmio_read, pvinfo_mmio_write},
 | 
			
		||||
	{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
 | 
			
		||||
	{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
 | 
			
		||||
	{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
 | 
			
		||||
					      unsigned int offset)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long device = intel_gvt_get_device_type(gvt);
 | 
			
		||||
	struct gvt_mmio_block *block = gvt_mmio_blocks;
 | 
			
		||||
	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
 | 
			
		||||
	int num = gvt->mmio.num_mmio_block;
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
 | 
			
		||||
	for (i = 0; i < num; i++, block++) {
 | 
			
		||||
		if (!(device & block->device))
 | 
			
		||||
			continue;
 | 
			
		||||
		if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
 | 
			
		||||
| 
						 | 
				
			
			@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
 | 
			
		|||
	gvt->mmio.mmio_attribute = NULL;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Special MMIO blocks. */
 | 
			
		||||
static struct gvt_mmio_block mmio_blocks[] = {
 | 
			
		||||
	{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
 | 
			
		||||
	{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
 | 
			
		||||
	{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
 | 
			
		||||
		pvinfo_mmio_read, pvinfo_mmio_write},
 | 
			
		||||
	{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
 | 
			
		||||
	{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
 | 
			
		||||
	{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
 | 
			
		||||
 * @gvt: GVT device
 | 
			
		||||
| 
						 | 
				
			
			@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
 | 
			
		|||
			goto err;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	gvt->mmio.mmio_block = mmio_blocks;
 | 
			
		||||
	gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
 | 
			
		||||
 | 
			
		||||
	gvt_dbg_mmio("traced %u virtual mmio registers\n",
 | 
			
		||||
		     gvt->mmio.num_tracked_mmio);
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
 | 
			
		|||
	gvt_mmio_func func;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (WARN_ON(bytes > 4))
 | 
			
		||||
	if (WARN_ON(bytes > 8))
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 | 
			
		|||
 | 
			
		||||
		i915_gem_request_put(fetch_and_zero(&workload->req));
 | 
			
		||||
 | 
			
		||||
		if (!workload->status && !vgpu->resetting) {
 | 
			
		||||
		if (!workload->status && !(vgpu->resetting_eng &
 | 
			
		||||
					   ENGINE_MASK(ring_id))) {
 | 
			
		||||
			update_guest_context(workload);
 | 
			
		||||
 | 
			
		||||
			for_each_set_bit(event, workload->pending_events,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
 | 
			
		|||
{
 | 
			
		||||
	struct intel_gvt *gvt = vgpu->gvt;
 | 
			
		||||
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 | 
			
		||||
	unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
 | 
			
		||||
 | 
			
		||||
	gvt_dbg_core("------------------------------------------\n");
 | 
			
		||||
	gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
 | 
			
		||||
		     vgpu->id, dmlr, engine_mask);
 | 
			
		||||
	vgpu->resetting = true;
 | 
			
		||||
 | 
			
		||||
	vgpu->resetting_eng = resetting_eng;
 | 
			
		||||
 | 
			
		||||
	intel_vgpu_stop_schedule(vgpu);
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
 | 
			
		|||
		mutex_lock(&gvt->lock);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
 | 
			
		||||
	intel_vgpu_reset_execlist(vgpu, resetting_eng);
 | 
			
		||||
 | 
			
		||||
	/* full GPU reset or device model level reset */
 | 
			
		||||
	if (engine_mask == ALL_ENGINES || dmlr) {
 | 
			
		||||
| 
						 | 
				
			
			@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
 | 
			
		|||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vgpu->resetting = false;
 | 
			
		||||
	vgpu->resetting_eng = 0;
 | 
			
		||||
	gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
 | 
			
		||||
	gvt_dbg_core("------------------------------------------\n");
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
 | 
			
		|||
		return true;
 | 
			
		||||
 | 
			
		||||
	case MUTEX_TRYLOCK_FAILED:
 | 
			
		||||
		*unlock = false;
 | 
			
		||||
		preempt_disable();
 | 
			
		||||
		do {
 | 
			
		||||
			cpu_relax();
 | 
			
		||||
			if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
 | 
			
		||||
	case MUTEX_TRYLOCK_SUCCESS:
 | 
			
		||||
				*unlock = true;
 | 
			
		||||
				return true;
 | 
			
		||||
				break;
 | 
			
		||||
			}
 | 
			
		||||
		} while (!need_resched());
 | 
			
		||||
		preempt_enable();
 | 
			
		||||
		return *unlock;
 | 
			
		||||
 | 
			
		||||
		return false;
 | 
			
		||||
	case MUTEX_TRYLOCK_SUCCESS:
 | 
			
		||||
		*unlock = true;
 | 
			
		||||
		return true;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	BUG();
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
 | 
			
		|||
	u32 *cs;
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	cs = intel_ring_begin(req, n_flex_regs * 2 + 4);
 | 
			
		||||
	cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
 | 
			
		||||
	if (IS_ERR(cs))
 | 
			
		||||
		return PTR_ERR(cs);
 | 
			
		||||
 | 
			
		||||
	*cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1);
 | 
			
		||||
	*cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
 | 
			
		||||
 | 
			
		||||
	*cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
 | 
			
		||||
	*cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
 | 
			
		|||
		}
 | 
			
		||||
 | 
			
		||||
		/* Program the max register to clamp values > 1.0. */
 | 
			
		||||
		i = lut_size - 1;
 | 
			
		||||
		I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
 | 
			
		||||
			   drm_color_lut_extract(lut[i].red, 16));
 | 
			
		||||
		I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
 | 
			
		|||
 | 
			
		||||
	if (i915.invert_brightness > 0 ||
 | 
			
		||||
	    dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
 | 
			
		||||
		return panel->backlight.max - val;
 | 
			
		||||
		return panel->backlight.max - val + panel->backlight.min;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return val;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5,7 +5,7 @@ config DRM_MSM
 | 
			
		|||
	depends on ARCH_QCOM || (ARM && COMPILE_TEST)
 | 
			
		||||
	depends on OF && COMMON_CLK
 | 
			
		||||
	depends on MMU
 | 
			
		||||
	select QCOM_MDT_LOADER
 | 
			
		||||
	select QCOM_MDT_LOADER if ARCH_QCOM
 | 
			
		||||
	select REGULATOR
 | 
			
		||||
	select DRM_KMS_HELPER
 | 
			
		||||
	select DRM_PANEL
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -15,7 +15,7 @@
 | 
			
		|||
#include <linux/cpumask.h>
 | 
			
		||||
#include <linux/qcom_scm.h>
 | 
			
		||||
#include <linux/dma-mapping.h>
 | 
			
		||||
#include <linux/of_reserved_mem.h>
 | 
			
		||||
#include <linux/of_address.h>
 | 
			
		||||
#include <linux/soc/qcom/mdt_loader.h>
 | 
			
		||||
#include "msm_gem.h"
 | 
			
		||||
#include "msm_mmu.h"
 | 
			
		||||
| 
						 | 
				
			
			@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu);
 | 
			
		|||
 | 
			
		||||
#define GPU_PAS_ID 13
 | 
			
		||||
 | 
			
		||||
#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
 | 
			
		||||
 | 
			
		||||
static int zap_shader_load_mdt(struct device *dev, const char *fwname)
 | 
			
		||||
{
 | 
			
		||||
	const struct firmware *fw;
 | 
			
		||||
	struct device_node *np;
 | 
			
		||||
	struct resource r;
 | 
			
		||||
	phys_addr_t mem_phys;
 | 
			
		||||
	ssize_t mem_size;
 | 
			
		||||
	void *mem_region = NULL;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (!IS_ENABLED(CONFIG_ARCH_QCOM))
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	np = of_get_child_by_name(dev->of_node, "zap-shader");
 | 
			
		||||
	if (!np)
 | 
			
		||||
		return -ENODEV;
 | 
			
		||||
 | 
			
		||||
	np = of_parse_phandle(np, "memory-region", 0);
 | 
			
		||||
	if (!np)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	ret = of_address_to_resource(np, 0, &r);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return ret;
 | 
			
		||||
 | 
			
		||||
	mem_phys = r.start;
 | 
			
		||||
	mem_size = resource_size(&r);
 | 
			
		||||
 | 
			
		||||
	/* Request the MDT file for the firmware */
 | 
			
		||||
	ret = request_firmware(&fw, fwname, dev);
 | 
			
		||||
	if (ret) {
 | 
			
		||||
| 
						 | 
				
			
			@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	/* Allocate memory for the firmware image */
 | 
			
		||||
	mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL);
 | 
			
		||||
	mem_region = memremap(mem_phys, mem_size,  MEMREMAP_WC);
 | 
			
		||||
	if (!mem_region) {
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
		goto out;
 | 
			
		||||
| 
						 | 
				
			
			@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
 | 
			
		|||
		DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
 | 
			
		||||
 | 
			
		||||
out:
 | 
			
		||||
	if (mem_region)
 | 
			
		||||
		memunmap(mem_region);
 | 
			
		||||
 | 
			
		||||
	release_firmware(fw);
 | 
			
		||||
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
#else
 | 
			
		||||
static int zap_shader_load_mdt(struct device *dev, const char *fwname)
 | 
			
		||||
{
 | 
			
		||||
	return -ENODEV;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 | 
			
		||||
	struct msm_file_private *ctx)
 | 
			
		||||
| 
						 | 
				
			
			@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 | 
			
		|||
	gpu->funcs->flush(gpu);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct a5xx_hwcg {
 | 
			
		||||
static const struct {
 | 
			
		||||
	u32 offset;
 | 
			
		||||
	u32 value;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static const struct a5xx_hwcg a530_hwcg[] = {
 | 
			
		||||
} a5xx_hwcg[] = {
 | 
			
		||||
	{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
 | 
			
		||||
	{REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
 | 
			
		||||
	{REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
 | 
			
		||||
| 
						 | 
				
			
			@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = {
 | 
			
		|||
	{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static const struct {
 | 
			
		||||
	int (*test)(struct adreno_gpu *gpu);
 | 
			
		||||
	const struct a5xx_hwcg *regs;
 | 
			
		||||
	unsigned int count;
 | 
			
		||||
} a5xx_hwcg_regs[] = {
 | 
			
		||||
	{ adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
 | 
			
		||||
		const struct a5xx_hwcg *regs, unsigned int count)
 | 
			
		||||
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
 | 
			
		||||
{
 | 
			
		||||
	unsigned int i;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < count; i++)
 | 
			
		||||
		gpu_write(gpu, regs[i].offset, regs[i].value);
 | 
			
		||||
	for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
 | 
			
		||||
		gpu_write(gpu, a5xx_hwcg[i].offset,
 | 
			
		||||
			state ? a5xx_hwcg[i].value : 0);
 | 
			
		||||
 | 
			
		||||
	gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00);
 | 
			
		||||
	gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void a5xx_enable_hwcg(struct msm_gpu *gpu)
 | 
			
		||||
{
 | 
			
		||||
	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 | 
			
		||||
	unsigned int i;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
 | 
			
		||||
		if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
 | 
			
		||||
			_a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
 | 
			
		||||
				a5xx_hwcg_regs[i].count);
 | 
			
		||||
			return;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
 | 
			
		||||
	gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int a5xx_me_init(struct msm_gpu *gpu)
 | 
			
		||||
| 
						 | 
				
			
			@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
 | 
			
		|||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Set up a child device to "own" the zap shader */
 | 
			
		||||
static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
 | 
			
		||||
{
 | 
			
		||||
	struct device_node *node;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (dev->parent)
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	/* Find the sub-node for the zap shader */
 | 
			
		||||
	node = of_get_child_by_name(parent->of_node, "zap-shader");
 | 
			
		||||
	if (!node) {
 | 
			
		||||
		DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
 | 
			
		||||
		return -ENODEV;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	dev->parent = parent;
 | 
			
		||||
	dev->of_node = node;
 | 
			
		||||
	dev_set_name(dev, "adreno_zap_shader");
 | 
			
		||||
 | 
			
		||||
	ret = device_register(dev);
 | 
			
		||||
	if (ret) {
 | 
			
		||||
		DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = of_reserved_mem_device_init(dev);
 | 
			
		||||
	if (ret) {
 | 
			
		||||
		DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
 | 
			
		||||
		device_unregister(dev);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
out:
 | 
			
		||||
	if (ret)
 | 
			
		||||
		dev->parent = NULL;
 | 
			
		||||
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int a5xx_zap_shader_init(struct msm_gpu *gpu)
 | 
			
		||||
{
 | 
			
		||||
	static bool loaded;
 | 
			
		||||
| 
						 | 
				
			
			@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
 | 
			
		|||
		return -ENODEV;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev);
 | 
			
		||||
 | 
			
		||||
	if (!ret)
 | 
			
		||||
		ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
 | 
			
		||||
			adreno_gpu->info->zapfw);
 | 
			
		||||
	ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
 | 
			
		||||
 | 
			
		||||
	loaded = !ret;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
 | 
			
		|||
	gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
 | 
			
		||||
 | 
			
		||||
	/* Enable HWCG */
 | 
			
		||||
	a5xx_enable_hwcg(gpu);
 | 
			
		||||
	a5xx_set_hwcg(gpu, true);
 | 
			
		||||
 | 
			
		||||
	gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu)
 | 
			
		|||
 | 
			
		||||
	DBG("%s", gpu->name);
 | 
			
		||||
 | 
			
		||||
	if (a5xx_gpu->zap_dev.parent)
 | 
			
		||||
		device_unregister(&a5xx_gpu->zap_dev);
 | 
			
		||||
 | 
			
		||||
	if (a5xx_gpu->pm4_bo) {
 | 
			
		||||
		if (a5xx_gpu->pm4_iova)
 | 
			
		||||
			msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
 | 
			
		||||
| 
						 | 
				
			
			@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = {
 | 
			
		|||
	0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
 | 
			
		||||
	0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
 | 
			
		||||
	0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
 | 
			
		||||
	0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807,
 | 
			
		||||
	0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
 | 
			
		||||
	0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
 | 
			
		||||
	0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82,
 | 
			
		||||
	0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2,
 | 
			
		||||
	0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7,
 | 
			
		||||
	0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8,
 | 
			
		||||
	0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145,
 | 
			
		||||
	0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
 | 
			
		||||
	0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43,
 | 
			
		||||
	0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E,
 | 
			
		||||
	0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147,
 | 
			
		||||
	0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7,
 | 
			
		||||
	0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268,
 | 
			
		||||
	0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB,
 | 
			
		||||
	0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405,
 | 
			
		||||
	0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3,
 | 
			
		||||
	0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9,
 | 
			
		||||
	0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01,
 | 
			
		||||
	0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A,
 | 
			
		||||
	0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F,
 | 
			
		||||
	0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
 | 
			
		||||
	0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF,
 | 
			
		||||
	0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF,
 | 
			
		||||
	~0
 | 
			
		||||
	0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
 | 
			
		||||
	0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
 | 
			
		||||
	0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
 | 
			
		||||
	0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
 | 
			
		||||
	0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
 | 
			
		||||
	0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
 | 
			
		||||
	0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
 | 
			
		||||
	0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
 | 
			
		||||
	0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
 | 
			
		||||
	0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
 | 
			
		||||
	0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
 | 
			
		||||
	0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
 | 
			
		||||
	0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
 | 
			
		||||
	0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
 | 
			
		||||
	0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
 | 
			
		||||
	0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
 | 
			
		||||
	0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
 | 
			
		||||
	0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
 | 
			
		||||
	0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
 | 
			
		||||
	0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
 | 
			
		||||
	0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
 | 
			
		||||
	0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
 | 
			
		||||
	0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
 | 
			
		||||
	0xB9A0, 0xB9BF, ~0
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static void a5xx_dump(struct msm_gpu *gpu)
 | 
			
		||||
| 
						 | 
				
			
			@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
 | 
			
		|||
{
 | 
			
		||||
	seq_printf(m, "status:   %08x\n",
 | 
			
		||||
			gpu_read(gpu, REG_A5XX_RBBM_STATUS));
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Temporarily disable hardware clock gating before going into
 | 
			
		||||
	 * adreno_show to avoid issues while reading the registers
 | 
			
		||||
	 */
 | 
			
		||||
	a5xx_set_hwcg(gpu, false);
 | 
			
		||||
	adreno_show(gpu, m);
 | 
			
		||||
	a5xx_set_hwcg(gpu, true);
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -36,8 +36,6 @@ struct a5xx_gpu {
 | 
			
		|||
	uint32_t gpmu_dwords;
 | 
			
		||||
 | 
			
		||||
	uint32_t lm_leakage;
 | 
			
		||||
 | 
			
		||||
	struct device zap_dev;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
 | 
			
		||||
| 
						 | 
				
			
			@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
bool a5xx_idle(struct msm_gpu *gpu);
 | 
			
		||||
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
 | 
			
		||||
 | 
			
		||||
#endif /* __A5XX_GPU_H__ */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
 | 
			
		|||
		*value = adreno_gpu->base.fast_rate;
 | 
			
		||||
		return 0;
 | 
			
		||||
	case MSM_PARAM_TIMESTAMP:
 | 
			
		||||
		if (adreno_gpu->funcs->get_timestamp)
 | 
			
		||||
			return adreno_gpu->funcs->get_timestamp(gpu, value);
 | 
			
		||||
		if (adreno_gpu->funcs->get_timestamp) {
 | 
			
		||||
			int ret;
 | 
			
		||||
 | 
			
		||||
			pm_runtime_get_sync(&gpu->pdev->dev);
 | 
			
		||||
			ret = adreno_gpu->funcs->get_timestamp(gpu, value);
 | 
			
		||||
			pm_runtime_put_autosuspend(&gpu->pdev->dev);
 | 
			
		||||
 | 
			
		||||
			return ret;
 | 
			
		||||
		}
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	default:
 | 
			
		||||
		DBG("%s: invalid param: %u", gpu->name, param);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
 | 
			
		|||
	struct msm_dsi_phy_clk_request *clk_req)
 | 
			
		||||
{
 | 
			
		||||
	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	ret = dsi_calc_clk_rate(msm_host);
 | 
			
		||||
	if (ret) {
 | 
			
		||||
		pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
 | 
			
		||||
	clk_req->escclk_rate = msm_host->esc_clk_rate;
 | 
			
		||||
| 
						 | 
				
			
			@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
 | 
			
		|||
					struct drm_display_mode *mode)
 | 
			
		||||
{
 | 
			
		||||
	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (msm_host->mode) {
 | 
			
		||||
		drm_mode_destroy(msm_host->dev, msm_host->mode);
 | 
			
		||||
| 
						 | 
				
			
			@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
 | 
			
		|||
		return -ENOMEM;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = dsi_calc_clk_rate(msm_host);
 | 
			
		||||
	if (ret) {
 | 
			
		||||
		pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
 | 
			
		||||
		return ret;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc)
 | 
			
		|||
	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
 | 
			
		||||
	uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
 | 
			
		||||
	enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
 | 
			
		||||
	enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
 | 
			
		||||
	enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
 | 
			
		||||
	int i, plane_cnt = 0;
 | 
			
		||||
	bool bg_alpha_enabled = false;
 | 
			
		||||
	u32 mixer_op_mode = 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -753,6 +753,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
 | 
			
		|||
	if (!handle) {
 | 
			
		||||
		DBG("Cursor off");
 | 
			
		||||
		cursor_enable = false;
 | 
			
		||||
		mdp5_enable(mdp5_kms);
 | 
			
		||||
		goto set_cursor;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -776,6 +777,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
 | 
			
		|||
 | 
			
		||||
	get_roi(crtc, &roi_w, &roi_h);
 | 
			
		||||
 | 
			
		||||
	mdp5_enable(mdp5_kms);
 | 
			
		||||
 | 
			
		||||
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
 | 
			
		||||
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
 | 
			
		||||
			MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
 | 
			
		||||
| 
						 | 
				
			
			@ -804,6 +807,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
 | 
			
		|||
	crtc_flush(crtc, flush_mask);
 | 
			
		||||
 | 
			
		||||
end:
 | 
			
		||||
	mdp5_disable(mdp5_kms);
 | 
			
		||||
	if (old_bo) {
 | 
			
		||||
		drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
 | 
			
		||||
		/* enable vblank to complete cursor work: */
 | 
			
		||||
| 
						 | 
				
			
			@ -836,6 +840,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 | 
			
		|||
 | 
			
		||||
	get_roi(crtc, &roi_w, &roi_h);
 | 
			
		||||
 | 
			
		||||
	mdp5_enable(mdp5_kms);
 | 
			
		||||
 | 
			
		||||
	spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
 | 
			
		||||
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
 | 
			
		||||
			MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
 | 
			
		||||
| 
						 | 
				
			
			@ -847,6 +853,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 | 
			
		|||
 | 
			
		||||
	crtc_flush(crtc, flush_mask);
 | 
			
		||||
 | 
			
		||||
	mdp5_disable(mdp5_kms);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
 | 
			
		|||
	struct mdp5_interface *intf = mdp5_encoder->intf;
 | 
			
		||||
 | 
			
		||||
	if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
 | 
			
		||||
		mdp5_cmd_encoder_disable(encoder);
 | 
			
		||||
		mdp5_cmd_encoder_enable(encoder);
 | 
			
		||||
	else
 | 
			
		||||
		mdp5_vid_encoder_enable(encoder);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
 | 
			
		|||
		const char *name, bool mandatory)
 | 
			
		||||
{
 | 
			
		||||
	struct device *dev = &pdev->dev;
 | 
			
		||||
	struct clk *clk = devm_clk_get(dev, name);
 | 
			
		||||
	struct clk *clk = msm_clk_get(pdev, name);
 | 
			
		||||
	if (IS_ERR(clk) && mandatory) {
 | 
			
		||||
		dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
 | 
			
		||||
		return PTR_ERR(clk);
 | 
			
		||||
| 
						 | 
				
			
			@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	/* mandatory clocks: */
 | 
			
		||||
	ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true);
 | 
			
		||||
	ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		goto fail;
 | 
			
		||||
	ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
 | 
			
		||||
	ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		goto fail;
 | 
			
		||||
	ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
 | 
			
		||||
	ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		goto fail;
 | 
			
		||||
	ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true);
 | 
			
		||||
	ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		goto fail;
 | 
			
		||||
 | 
			
		||||
	/* optional clocks: */
 | 
			
		||||
	get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false);
 | 
			
		||||
	get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
 | 
			
		||||
 | 
			
		||||
	/* we need to set a default rate before enabling.  Set a safe
 | 
			
		||||
	 * rate first, then figure out hw revision, and then set a
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -890,8 +890,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
 | 
			
		|||
	struct mdp5_hw_pipe *right_hwpipe;
 | 
			
		||||
	const struct mdp_format *format;
 | 
			
		||||
	uint32_t nplanes, config = 0;
 | 
			
		||||
	struct phase_step step = { 0 };
 | 
			
		||||
	struct pixel_ext pe = { 0 };
 | 
			
		||||
	struct phase_step step = { { 0 } };
 | 
			
		||||
	struct pixel_ext pe = { { 0 } };
 | 
			
		||||
	uint32_t hdecm = 0, vdecm = 0;
 | 
			
		||||
	uint32_t pix_format;
 | 
			
		||||
	unsigned int rotation;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
 | 
			
		|||
		struct page **pages;
 | 
			
		||||
 | 
			
		||||
		vma = add_vma(obj, aspace);
 | 
			
		||||
		if (IS_ERR(vma))
 | 
			
		||||
			return PTR_ERR(vma);
 | 
			
		||||
		if (IS_ERR(vma)) {
 | 
			
		||||
			ret = PTR_ERR(vma);
 | 
			
		||||
			goto unlock;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		pages = get_pages(obj);
 | 
			
		||||
		if (IS_ERR(pages)) {
 | 
			
		||||
| 
						 | 
				
			
			@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
 | 
			
		|||
 | 
			
		||||
fail:
 | 
			
		||||
	del_vma(vma);
 | 
			
		||||
 | 
			
		||||
unlock:
 | 
			
		||||
	mutex_unlock(&msm_obj->lock);
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
 | 
			
		|||
	if (use_vram) {
 | 
			
		||||
		struct msm_gem_vma *vma;
 | 
			
		||||
		struct page **pages;
 | 
			
		||||
		struct msm_gem_object *msm_obj = to_msm_bo(obj);
 | 
			
		||||
 | 
			
		||||
		mutex_lock(&msm_obj->lock);
 | 
			
		||||
 | 
			
		||||
		vma = add_vma(obj, NULL);
 | 
			
		||||
		mutex_unlock(&msm_obj->lock);
 | 
			
		||||
		if (IS_ERR(vma)) {
 | 
			
		||||
			ret = PTR_ERR(vma);
 | 
			
		||||
			goto fail;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
 | 
			
		|||
		struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
 | 
			
		||||
{
 | 
			
		||||
	struct msm_gem_submit *submit;
 | 
			
		||||
	uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
 | 
			
		||||
		(nr_cmds * sizeof(submit->cmd[0]));
 | 
			
		||||
	uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
 | 
			
		||||
		((u64)nr_cmds * sizeof(submit->cmd[0]));
 | 
			
		||||
 | 
			
		||||
	if (sz > SIZE_MAX)
 | 
			
		||||
		return NULL;
 | 
			
		||||
| 
						 | 
				
			
			@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 | 
			
		|||
	if (ret)
 | 
			
		||||
		goto out;
 | 
			
		||||
 | 
			
		||||
	if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
 | 
			
		||||
	if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
 | 
			
		||||
		ret = submit_fence_sync(submit);
 | 
			
		||||
		if (ret)
 | 
			
		||||
			goto out;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -42,7 +42,7 @@ void
 | 
			
		|||
msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
 | 
			
		||||
		struct msm_gem_vma *vma, struct sg_table *sgt)
 | 
			
		||||
{
 | 
			
		||||
	if (!vma->iova)
 | 
			
		||||
	if (!aspace || !vma->iova)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	if (aspace->mmu) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
 | 
			
		|||
	/* Create output path objects for each VBIOS display path. */
 | 
			
		||||
	i = -1;
 | 
			
		||||
	while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
 | 
			
		||||
		if (ver < 0x40) /* No support for chipsets prior to NV50. */
 | 
			
		||||
			break;
 | 
			
		||||
		if (dcbE.type == DCB_OUTPUT_UNUSED)
 | 
			
		||||
			continue;
 | 
			
		||||
		if (dcbE.type == DCB_OUTPUT_EOL)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -500,7 +500,7 @@ static void vop_line_flag_irq_disable(struct vop *vop)
 | 
			
		|||
static int vop_enable(struct drm_crtc *crtc)
 | 
			
		||||
{
 | 
			
		||||
	struct vop *vop = to_vop(crtc);
 | 
			
		||||
	int ret;
 | 
			
		||||
	int ret, i;
 | 
			
		||||
 | 
			
		||||
	ret = pm_runtime_get_sync(vop->dev);
 | 
			
		||||
	if (ret < 0) {
 | 
			
		||||
| 
						 | 
				
			
			@ -533,6 +533,20 @@ static int vop_enable(struct drm_crtc *crtc)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	memcpy(vop->regs, vop->regsbak, vop->len);
 | 
			
		||||
	/*
 | 
			
		||||
	 * We need to make sure that all windows are disabled before we
 | 
			
		||||
	 * enable the crtc. Otherwise we might try to scan from a destroyed
 | 
			
		||||
	 * buffer later.
 | 
			
		||||
	 */
 | 
			
		||||
	for (i = 0; i < vop->data->win_size; i++) {
 | 
			
		||||
		struct vop_win *vop_win = &vop->win[i];
 | 
			
		||||
		const struct vop_win_data *win = vop_win->data;
 | 
			
		||||
 | 
			
		||||
		spin_lock(&vop->reg_lock);
 | 
			
		||||
		VOP_WIN_SET(vop, win, enable, 0);
 | 
			
		||||
		spin_unlock(&vop->reg_lock);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vop_cfg_done(vop);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			@ -566,28 +580,11 @@ static int vop_enable(struct drm_crtc *crtc)
 | 
			
		|||
static void vop_crtc_disable(struct drm_crtc *crtc)
 | 
			
		||||
{
 | 
			
		||||
	struct vop *vop = to_vop(crtc);
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	WARN_ON(vop->event);
 | 
			
		||||
 | 
			
		||||
	rockchip_drm_psr_deactivate(&vop->crtc);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * We need to make sure that all windows are disabled before we
 | 
			
		||||
	 * disable that crtc. Otherwise we might try to scan from a destroyed
 | 
			
		||||
	 * buffer later.
 | 
			
		||||
	 */
 | 
			
		||||
	for (i = 0; i < vop->data->win_size; i++) {
 | 
			
		||||
		struct vop_win *vop_win = &vop->win[i];
 | 
			
		||||
		const struct vop_win_data *win = vop_win->data;
 | 
			
		||||
 | 
			
		||||
		spin_lock(&vop->reg_lock);
 | 
			
		||||
		VOP_WIN_SET(vop, win, enable, 0);
 | 
			
		||||
		spin_unlock(&vop->reg_lock);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vop_cfg_done(vop);
 | 
			
		||||
 | 
			
		||||
	drm_crtc_vblank_off(crtc);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			@ -682,8 +679,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
 | 
			
		|||
	 * Src.x1 can be odd when do clip, but yuv plane start point
 | 
			
		||||
	 * need align with 2 pixel.
 | 
			
		||||
	 */
 | 
			
		||||
	if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2))
 | 
			
		||||
	if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {
 | 
			
		||||
		DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -764,7 +763,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
 | 
			
		|||
	spin_lock(&vop->reg_lock);
 | 
			
		||||
 | 
			
		||||
	VOP_WIN_SET(vop, win, format, format);
 | 
			
		||||
	VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
 | 
			
		||||
	VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
 | 
			
		||||
	VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
 | 
			
		||||
	if (is_yuv_support(fb->format->format)) {
 | 
			
		||||
		int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
 | 
			
		||||
| 
						 | 
				
			
			@ -778,7 +777,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
 | 
			
		|||
		offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
 | 
			
		||||
 | 
			
		||||
		dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
 | 
			
		||||
		VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
 | 
			
		||||
		VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
 | 
			
		||||
		VOP_WIN_SET(vop, win, uv_mst, dma_addr);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -282,6 +282,9 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
 | 
			
		|||
 | 
			
		||||
	act_height = (src_h + vskiplines - 1) / vskiplines;
 | 
			
		||||
 | 
			
		||||
	if (act_height == dst_h)
 | 
			
		||||
		return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
 | 
			
		||||
 | 
			
		||||
	return GET_SCL_FT_BILI_DN(act_height, dst_h);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -7,7 +7,6 @@ config DRM_STM
 | 
			
		|||
	select DRM_PANEL
 | 
			
		||||
	select VIDEOMODE_HELPERS
 | 
			
		||||
	select FB_PROVIDE_GET_FB_UNMAPPED_AREA
 | 
			
		||||
	default y
 | 
			
		||||
 | 
			
		||||
	help
 | 
			
		||||
	  Enable support for the on-chip display controller on
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -449,6 +449,10 @@ static void bcm_sysport_get_stats(struct net_device *dev,
 | 
			
		|||
			p = (char *)&dev->stats;
 | 
			
		||||
		else
 | 
			
		||||
			p = (char *)priv;
 | 
			
		||||
 | 
			
		||||
		if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
		p += s->stat_offset;
 | 
			
		||||
		data[j] = *(unsigned long *)p;
 | 
			
		||||
		j++;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1091,7 +1091,7 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
 | 
			
		|||
	if (data[IFLA_GENEVE_ID]) {
 | 
			
		||||
		__u32 vni =  nla_get_u32(data[IFLA_GENEVE_ID]);
 | 
			
		||||
 | 
			
		||||
		if (vni >= GENEVE_VID_MASK)
 | 
			
		||||
		if (vni >= GENEVE_N_VID)
 | 
			
		||||
			return -ERANGE;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -4259,6 +4259,41 @@ int pci_reset_function(struct pci_dev *dev)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(pci_reset_function);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * pci_reset_function_locked - quiesce and reset a PCI device function
 | 
			
		||||
 * @dev: PCI device to reset
 | 
			
		||||
 *
 | 
			
		||||
 * Some devices allow an individual function to be reset without affecting
 | 
			
		||||
 * other functions in the same device.  The PCI device must be responsive
 | 
			
		||||
 * to PCI config space in order to use this function.
 | 
			
		||||
 *
 | 
			
		||||
 * This function does not just reset the PCI portion of a device, but
 | 
			
		||||
 * clears all the state associated with the device.  This function differs
 | 
			
		||||
 * from __pci_reset_function() in that it saves and restores device state
 | 
			
		||||
 * over the reset.  It also differs from pci_reset_function() in that it
 | 
			
		||||
 * requires the PCI device lock to be held.
 | 
			
		||||
 *
 | 
			
		||||
 * Returns 0 if the device function was successfully reset or negative if the
 | 
			
		||||
 * device doesn't support resetting a single function.
 | 
			
		||||
 */
 | 
			
		||||
int pci_reset_function_locked(struct pci_dev *dev)
 | 
			
		||||
{
 | 
			
		||||
	int rc;
 | 
			
		||||
 | 
			
		||||
	rc = pci_probe_reset_function(dev);
 | 
			
		||||
	if (rc)
 | 
			
		||||
		return rc;
 | 
			
		||||
 | 
			
		||||
	pci_dev_save_and_disable(dev);
 | 
			
		||||
 | 
			
		||||
	rc = __pci_reset_function_locked(dev);
 | 
			
		||||
 | 
			
		||||
	pci_dev_restore(dev);
 | 
			
		||||
 | 
			
		||||
	return rc;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(pci_reset_function_locked);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * pci_try_reset_function - quiesce and reset a PCI device function
 | 
			
		||||
 * @dev: PCI device to reset
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1150,3 +1150,23 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev)
 | 
			
		|||
}
 | 
			
		||||
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
 | 
			
		||||
			PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
 | 
			
		||||
 | 
			
		||||
bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * Our dear uPD72020{1,2} friend only partially resets when
 | 
			
		||||
	 * asked to via the XHCI interface, and may end up doing DMA
 | 
			
		||||
	 * at the wrong addresses, as it keeps the top 32bit of some
 | 
			
		||||
	 * addresses from its previous programming under obscure
 | 
			
		||||
	 * circumstances.
 | 
			
		||||
	 * Give it a good wack at probe time. Unfortunately, this
 | 
			
		||||
	 * needs to happen before we've had a chance to discover any
 | 
			
		||||
	 * quirk, or the system will be in a rather bad state.
 | 
			
		||||
	 */
 | 
			
		||||
	if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
 | 
			
		||||
	    (pdev->device == 0x0014 || pdev->device == 0x0015))
 | 
			
		||||
		return true;
 | 
			
		||||
 | 
			
		||||
	return false;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -15,6 +15,7 @@ void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
 | 
			
		|||
void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
 | 
			
		||||
void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
 | 
			
		||||
void sb800_prefetch(struct device *dev, int on);
 | 
			
		||||
bool usb_xhci_needs_pci_reset(struct pci_dev *pdev);
 | 
			
		||||
#else
 | 
			
		||||
struct pci_dev;
 | 
			
		||||
static inline void usb_amd_quirk_pll_disable(void) {}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -284,6 +284,13 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 | 
			
		|||
 | 
			
		||||
	driver = (struct hc_driver *)id->driver_data;
 | 
			
		||||
 | 
			
		||||
	/* For some HW implementation, a XHCI reset is just not enough... */
 | 
			
		||||
	if (usb_xhci_needs_pci_reset(dev)) {
 | 
			
		||||
		dev_info(&dev->dev, "Resetting\n");
 | 
			
		||||
		if (pci_reset_function_locked(dev))
 | 
			
		||||
			dev_warn(&dev->dev, "Reset failed");
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Prevent runtime suspending between USB-2 and USB-3 initialization */
 | 
			
		||||
	pm_runtime_get_noresume(&dev->dev);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -106,13 +106,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
 | 
			
		|||
		    global_node_page_state(NR_FILE_MAPPED));
 | 
			
		||||
	show_val_kb(m, "Shmem:          ", i.sharedram);
 | 
			
		||||
	show_val_kb(m, "Slab:           ",
 | 
			
		||||
		    global_page_state(NR_SLAB_RECLAIMABLE) +
 | 
			
		||||
		    global_page_state(NR_SLAB_UNRECLAIMABLE));
 | 
			
		||||
		    global_node_page_state(NR_SLAB_RECLAIMABLE) +
 | 
			
		||||
		    global_node_page_state(NR_SLAB_UNRECLAIMABLE));
 | 
			
		||||
 | 
			
		||||
	show_val_kb(m, "SReclaimable:   ",
 | 
			
		||||
		    global_page_state(NR_SLAB_RECLAIMABLE));
 | 
			
		||||
		    global_node_page_state(NR_SLAB_RECLAIMABLE));
 | 
			
		||||
	show_val_kb(m, "SUnreclaim:     ",
 | 
			
		||||
		    global_page_state(NR_SLAB_UNRECLAIMABLE));
 | 
			
		||||
		    global_node_page_state(NR_SLAB_UNRECLAIMABLE));
 | 
			
		||||
	seq_printf(m, "KernelStack:    %8lu kB\n",
 | 
			
		||||
		   global_page_state(NR_KERNEL_STACK_KB));
 | 
			
		||||
	show_val_kb(m, "PageTables:     ",
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -16,9 +16,10 @@
 | 
			
		|||
#include <linux/mmu_notifier.h>
 | 
			
		||||
#include <linux/page_idle.h>
 | 
			
		||||
#include <linux/shmem_fs.h>
 | 
			
		||||
#include <linux/uaccess.h>
 | 
			
		||||
 | 
			
		||||
#include <asm/elf.h>
 | 
			
		||||
#include <linux/uaccess.h>
 | 
			
		||||
#include <asm/tlb.h>
 | 
			
		||||
#include <asm/tlbflush.h>
 | 
			
		||||
#include "internal.h"
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1008,6 +1009,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 | 
			
		|||
	struct mm_struct *mm;
 | 
			
		||||
	struct vm_area_struct *vma;
 | 
			
		||||
	enum clear_refs_types type;
 | 
			
		||||
	struct mmu_gather tlb;
 | 
			
		||||
	int itype;
 | 
			
		||||
	int rv;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1054,6 +1056,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 | 
			
		|||
		}
 | 
			
		||||
 | 
			
		||||
		down_read(&mm->mmap_sem);
 | 
			
		||||
		tlb_gather_mmu(&tlb, mm, 0, -1);
 | 
			
		||||
		if (type == CLEAR_REFS_SOFT_DIRTY) {
 | 
			
		||||
			for (vma = mm->mmap; vma; vma = vma->vm_next) {
 | 
			
		||||
				if (!(vma->vm_flags & VM_SOFTDIRTY))
 | 
			
		||||
| 
						 | 
				
			
			@ -1075,7 +1078,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 | 
			
		|||
		walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
 | 
			
		||||
		if (type == CLEAR_REFS_SOFT_DIRTY)
 | 
			
		||||
			mmu_notifier_invalidate_range_end(mm, 0, -1);
 | 
			
		||||
		flush_tlb_mm(mm);
 | 
			
		||||
		tlb_finish_mmu(&tlb, 0, -1);
 | 
			
		||||
		up_read(&mm->mmap_sem);
 | 
			
		||||
out_mm:
 | 
			
		||||
		mmput(mm);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1597,7 +1597,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
 | 
			
		|||
				   uffdio_copy.len);
 | 
			
		||||
		mmput(ctx->mm);
 | 
			
		||||
	} else {
 | 
			
		||||
		return -ENOSPC;
 | 
			
		||||
		return -ESRCH;
 | 
			
		||||
	}
 | 
			
		||||
	if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
 | 
			
		||||
		return -EFAULT;
 | 
			
		||||
| 
						 | 
				
			
			@ -1644,7 +1644,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
 | 
			
		|||
				     uffdio_zeropage.range.len);
 | 
			
		||||
		mmput(ctx->mm);
 | 
			
		||||
	} else {
 | 
			
		||||
		return -ENOSPC;
 | 
			
		||||
		return -ESRCH;
 | 
			
		||||
	}
 | 
			
		||||
	if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
 | 
			
		||||
		return -EFAULT;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -112,10 +112,11 @@ struct mmu_gather {
 | 
			
		|||
 | 
			
		||||
#define HAVE_GENERIC_MMU_GATHER
 | 
			
		||||
 | 
			
		||||
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
 | 
			
		||||
void arch_tlb_gather_mmu(struct mmu_gather *tlb,
 | 
			
		||||
	struct mm_struct *mm, unsigned long start, unsigned long end);
 | 
			
		||||
void tlb_flush_mmu(struct mmu_gather *tlb);
 | 
			
		||||
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
 | 
			
		||||
							unsigned long end);
 | 
			
		||||
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
			 unsigned long start, unsigned long end, bool force);
 | 
			
		||||
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
 | 
			
		||||
				   int page_size);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -487,14 +487,12 @@ struct mm_struct {
 | 
			
		|||
	/* numa_scan_seq prevents two threads setting pte_numa */
 | 
			
		||||
	int numa_scan_seq;
 | 
			
		||||
#endif
 | 
			
		||||
#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
 | 
			
		||||
	/*
 | 
			
		||||
	 * An operation with batched TLB flushing is going on. Anything that
 | 
			
		||||
	 * can move process memory needs to flush the TLB when moving a
 | 
			
		||||
	 * PROT_NONE or PROT_NUMA mapped page.
 | 
			
		||||
	 */
 | 
			
		||||
	bool tlb_flush_pending;
 | 
			
		||||
#endif
 | 
			
		||||
	atomic_t tlb_flush_pending;
 | 
			
		||||
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 | 
			
		||||
	/* See flush_tlb_batched_pending() */
 | 
			
		||||
	bool tlb_flush_batched;
 | 
			
		||||
| 
						 | 
				
			
			@ -522,12 +520,18 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
 | 
			
		|||
	return mm->cpu_vm_mask_var;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
 | 
			
		||||
struct mmu_gather;
 | 
			
		||||
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 | 
			
		||||
				unsigned long start, unsigned long end);
 | 
			
		||||
extern void tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
				unsigned long start, unsigned long end);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Memory barriers to keep this state in sync are graciously provided by
 | 
			
		||||
 * the page table locks, outside of which no page table modifications happen.
 | 
			
		||||
 * The barriers below prevent the compiler from re-ordering the instructions
 | 
			
		||||
 * around the memory barriers that are already present in the code.
 | 
			
		||||
 * The barriers are used to ensure the order between tlb_flush_pending updates,
 | 
			
		||||
 * which happen while the lock is not taken, and the PTE updates, which happen
 | 
			
		||||
 * while the lock is taken, are serialized.
 | 
			
		||||
 */
 | 
			
		||||
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -535,11 +539,26 @@ static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
 | 
			
		|||
	 * Must be called with PTL held; such that our PTL acquire will have
 | 
			
		||||
	 * observed the store from set_tlb_flush_pending().
 | 
			
		||||
	 */
 | 
			
		||||
	return mm->tlb_flush_pending;
 | 
			
		||||
	return atomic_read(&mm->tlb_flush_pending) > 0;
 | 
			
		||||
}
 | 
			
		||||
static inline void set_tlb_flush_pending(struct mm_struct *mm)
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Returns true if there are two above TLB batching threads in parallel.
 | 
			
		||||
 */
 | 
			
		||||
static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
 | 
			
		||||
{
 | 
			
		||||
	mm->tlb_flush_pending = true;
 | 
			
		||||
	return atomic_read(&mm->tlb_flush_pending) > 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void init_tlb_flush_pending(struct mm_struct *mm)
 | 
			
		||||
{
 | 
			
		||||
	atomic_set(&mm->tlb_flush_pending, 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void inc_tlb_flush_pending(struct mm_struct *mm)
 | 
			
		||||
{
 | 
			
		||||
	atomic_inc(&mm->tlb_flush_pending);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * The only time this value is relevant is when there are indeed pages
 | 
			
		||||
	 * to flush. And we'll only flush pages after changing them, which
 | 
			
		||||
| 
						 | 
				
			
			@ -547,7 +566,7 @@ static inline void set_tlb_flush_pending(struct mm_struct *mm)
 | 
			
		|||
	 *
 | 
			
		||||
	 * So the ordering here is:
 | 
			
		||||
	 *
 | 
			
		||||
	 *	mm->tlb_flush_pending = true;
 | 
			
		||||
	 *	atomic_inc(&mm->tlb_flush_pending);
 | 
			
		||||
	 *	spin_lock(&ptl);
 | 
			
		||||
	 *	...
 | 
			
		||||
	 *	set_pte_at();
 | 
			
		||||
| 
						 | 
				
			
			@ -559,30 +578,25 @@ static inline void set_tlb_flush_pending(struct mm_struct *mm)
 | 
			
		|||
	 *				spin_unlock(&ptl);
 | 
			
		||||
	 *
 | 
			
		||||
	 *	flush_tlb_range();
 | 
			
		||||
	 *	mm->tlb_flush_pending = false;
 | 
			
		||||
	 *	atomic_dec(&mm->tlb_flush_pending);
 | 
			
		||||
	 *
 | 
			
		||||
	 * So the =true store is constrained by the PTL unlock, and the =false
 | 
			
		||||
	 * store is constrained by the TLB invalidate.
 | 
			
		||||
	 */
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Clearing is done after a TLB flush, which also provides a barrier. */
 | 
			
		||||
static inline void clear_tlb_flush_pending(struct mm_struct *mm)
 | 
			
		||||
static inline void dec_tlb_flush_pending(struct mm_struct *mm)
 | 
			
		||||
{
 | 
			
		||||
	/* see set_tlb_flush_pending */
 | 
			
		||||
	mm->tlb_flush_pending = false;
 | 
			
		||||
	/*
 | 
			
		||||
	 * Guarantee that the tlb_flush_pending does not not leak into the
 | 
			
		||||
	 * critical section, since we must order the PTE change and changes to
 | 
			
		||||
	 * the pending TLB flush indication. We could have relied on TLB flush
 | 
			
		||||
	 * as a memory barrier, but this behavior is not clearly documented.
 | 
			
		||||
	 */
 | 
			
		||||
	smp_mb__before_atomic();
 | 
			
		||||
	atomic_dec(&mm->tlb_flush_pending);
 | 
			
		||||
}
 | 
			
		||||
#else
 | 
			
		||||
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
 | 
			
		||||
{
 | 
			
		||||
	return false;
 | 
			
		||||
}
 | 
			
		||||
static inline void set_tlb_flush_pending(struct mm_struct *mm)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
static inline void clear_tlb_flush_pending(struct mm_struct *mm)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
struct vm_fault;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1067,6 +1067,7 @@ void pcie_flr(struct pci_dev *dev);
 | 
			
		|||
int __pci_reset_function(struct pci_dev *dev);
 | 
			
		||||
int __pci_reset_function_locked(struct pci_dev *dev);
 | 
			
		||||
int pci_reset_function(struct pci_dev *dev);
 | 
			
		||||
int pci_reset_function_locked(struct pci_dev *dev);
 | 
			
		||||
int pci_try_reset_function(struct pci_dev *dev);
 | 
			
		||||
int pci_probe_reset_slot(struct pci_slot *slot);
 | 
			
		||||
int pci_reset_slot(struct pci_slot *slot);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -43,12 +43,13 @@ struct sync_file {
 | 
			
		|||
#endif
 | 
			
		||||
 | 
			
		||||
	wait_queue_head_t	wq;
 | 
			
		||||
	unsigned long		flags;
 | 
			
		||||
 | 
			
		||||
	struct dma_fence	*fence;
 | 
			
		||||
	struct dma_fence_cb cb;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS
 | 
			
		||||
#define POLL_ENABLED 0
 | 
			
		||||
 | 
			
		||||
struct sync_file *sync_file_create(struct dma_fence *fence);
 | 
			
		||||
struct dma_fence *sync_file_get_fence(int fd);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -171,7 +171,7 @@ struct drm_msm_gem_submit_cmd {
 | 
			
		|||
	__u32 size;           /* in, cmdstream size */
 | 
			
		||||
	__u32 pad;
 | 
			
		||||
	__u32 nr_relocs;      /* in, number of submit_reloc's */
 | 
			
		||||
	__u64 __user relocs;  /* in, ptr to array of submit_reloc's */
 | 
			
		||||
	__u64 relocs;         /* in, ptr to array of submit_reloc's */
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
 | 
			
		||||
| 
						 | 
				
			
			@ -215,8 +215,8 @@ struct drm_msm_gem_submit {
 | 
			
		|||
	__u32 fence;          /* out */
 | 
			
		||||
	__u32 nr_bos;         /* in, number of submit_bo's */
 | 
			
		||||
	__u32 nr_cmds;        /* in, number of submit_cmd's */
 | 
			
		||||
	__u64 __user bos;     /* in, ptr to array of submit_bo's */
 | 
			
		||||
	__u64 __user cmds;    /* in, ptr to array of submit_cmd's */
 | 
			
		||||
	__u64 bos;            /* in, ptr to array of submit_bo's */
 | 
			
		||||
	__u64 cmds;           /* in, ptr to array of submit_cmd's */
 | 
			
		||||
	__s32 fence_fd;       /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -809,7 +809,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
 | 
			
		|||
	mm_init_aio(mm);
 | 
			
		||||
	mm_init_owner(mm, p);
 | 
			
		||||
	mmu_notifier_mm_init(mm);
 | 
			
		||||
	clear_tlb_flush_pending(mm);
 | 
			
		||||
	init_tlb_flush_pending(mm);
 | 
			
		||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
 | 
			
		||||
	mm->pmd_huge_pte = NULL;
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1650,7 +1650,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
 | 
			
		|||
{
 | 
			
		||||
	unsigned long size;
 | 
			
		||||
 | 
			
		||||
	size = global_page_state(NR_SLAB_RECLAIMABLE)
 | 
			
		||||
	size = global_node_page_state(NR_SLAB_RECLAIMABLE)
 | 
			
		||||
		+ global_node_page_state(NR_ACTIVE_ANON)
 | 
			
		||||
		+ global_node_page_state(NR_INACTIVE_ANON)
 | 
			
		||||
		+ global_node_page_state(NR_ACTIVE_FILE)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -110,10 +110,12 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
 | 
			
		|||
	if (in_task()) {
 | 
			
		||||
		unsigned int fail_nth = READ_ONCE(current->fail_nth);
 | 
			
		||||
 | 
			
		||||
		if (fail_nth && !WRITE_ONCE(current->fail_nth, fail_nth - 1))
 | 
			
		||||
			goto fail;
 | 
			
		||||
		if (fail_nth) {
 | 
			
		||||
			if (!WRITE_ONCE(current->fail_nth, fail_nth - 1))
 | 
			
		||||
				goto fail;
 | 
			
		||||
 | 
			
		||||
		return false;
 | 
			
		||||
			return false;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* No need to check any other properties if the probability is 0 */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -485,7 +485,7 @@ static ssize_t config_show(struct device *dev,
 | 
			
		|||
				config->test_driver);
 | 
			
		||||
	else
 | 
			
		||||
		len += snprintf(buf+len, PAGE_SIZE - len,
 | 
			
		||||
				"driver:\tEMTPY\n");
 | 
			
		||||
				"driver:\tEMPTY\n");
 | 
			
		||||
 | 
			
		||||
	if (config->test_fs)
 | 
			
		||||
		len += snprintf(buf+len, PAGE_SIZE - len,
 | 
			
		||||
| 
						 | 
				
			
			@ -493,7 +493,7 @@ static ssize_t config_show(struct device *dev,
 | 
			
		|||
				config->test_fs);
 | 
			
		||||
	else
 | 
			
		||||
		len += snprintf(buf+len, PAGE_SIZE - len,
 | 
			
		||||
				"fs:\tEMTPY\n");
 | 
			
		||||
				"fs:\tEMPTY\n");
 | 
			
		||||
 | 
			
		||||
	mutex_unlock(&test_dev->config_mutex);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -746,11 +746,11 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
 | 
			
		|||
						      strlen(test_str));
 | 
			
		||||
		break;
 | 
			
		||||
	case TEST_KMOD_FS_TYPE:
 | 
			
		||||
		break;
 | 
			
		||||
		kfree_const(config->test_fs);
 | 
			
		||||
		config->test_driver = NULL;
 | 
			
		||||
		copied = config_copy_test_fs(config, test_str,
 | 
			
		||||
					     strlen(test_str));
 | 
			
		||||
		break;
 | 
			
		||||
	default:
 | 
			
		||||
		mutex_unlock(&test_dev->config_mutex);
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
| 
						 | 
				
			
			@ -880,10 +880,10 @@ static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev,
 | 
			
		|||
					    int (*test_sync)(struct kmod_test_device *test_dev))
 | 
			
		||||
{
 | 
			
		||||
	int ret;
 | 
			
		||||
	long new;
 | 
			
		||||
	unsigned long new;
 | 
			
		||||
	unsigned int old_val;
 | 
			
		||||
 | 
			
		||||
	ret = kstrtol(buf, 10, &new);
 | 
			
		||||
	ret = kstrtoul(buf, 10, &new);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return ret;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -918,9 +918,9 @@ static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev,
 | 
			
		|||
					     unsigned int max)
 | 
			
		||||
{
 | 
			
		||||
	int ret;
 | 
			
		||||
	long new;
 | 
			
		||||
	unsigned long new;
 | 
			
		||||
 | 
			
		||||
	ret = kstrtol(buf, 10, &new);
 | 
			
		||||
	ret = kstrtoul(buf, 10, &new);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return ret;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1146,7 +1146,7 @@ static struct kmod_test_device *register_test_dev_kmod(void)
 | 
			
		|||
	struct kmod_test_device *test_dev = NULL;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	mutex_unlock(®_dev_mutex);
 | 
			
		||||
	mutex_lock(®_dev_mutex);
 | 
			
		||||
 | 
			
		||||
	/* int should suffice for number of devices, test for wrap */
 | 
			
		||||
	if (unlikely(num_test_devs + 1) < 0) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -24,7 +24,7 @@ struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
 | 
			
		|||
{
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	struct page *page = alloc_page(balloon_mapping_gfp_mask() |
 | 
			
		||||
				__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_ZERO);
 | 
			
		||||
				       __GFP_NOMEMALLOC | __GFP_NORETRY);
 | 
			
		||||
	if (!page)
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -124,9 +124,7 @@ void dump_mm(const struct mm_struct *mm)
 | 
			
		|||
#ifdef CONFIG_NUMA_BALANCING
 | 
			
		||||
		"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
 | 
			
		||||
#endif
 | 
			
		||||
#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
 | 
			
		||||
		"tlb_flush_pending %d\n"
 | 
			
		||||
#endif
 | 
			
		||||
		"def_flags: %#lx(%pGv)\n",
 | 
			
		||||
 | 
			
		||||
		mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
 | 
			
		||||
| 
						 | 
				
			
			@ -158,9 +156,7 @@ void dump_mm(const struct mm_struct *mm)
 | 
			
		|||
#ifdef CONFIG_NUMA_BALANCING
 | 
			
		||||
		mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
 | 
			
		||||
#endif
 | 
			
		||||
#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
 | 
			
		||||
		mm->tlb_flush_pending,
 | 
			
		||||
#endif
 | 
			
		||||
		atomic_read(&mm->tlb_flush_pending),
 | 
			
		||||
		mm->def_flags, &mm->def_flags
 | 
			
		||||
	);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1496,6 +1496,13 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
 | 
			
		|||
		goto clear_pmdnuma;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * The page_table_lock above provides a memory barrier
 | 
			
		||||
	 * with change_protection_range.
 | 
			
		||||
	 */
 | 
			
		||||
	if (mm_tlb_flush_pending(vma->vm_mm))
 | 
			
		||||
		flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Since we took the NUMA fault, we must have observed the !accessible
 | 
			
		||||
	 * bit. Make sure all other CPUs agree with that, to avoid them
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -4062,9 +4062,9 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
 | 
			
		|||
	return ret;
 | 
			
		||||
out_release_unlock:
 | 
			
		||||
	spin_unlock(ptl);
 | 
			
		||||
out_release_nounlock:
 | 
			
		||||
	if (vm_shared)
 | 
			
		||||
		unlock_page(page);
 | 
			
		||||
out_release_nounlock:
 | 
			
		||||
	put_page(page);
 | 
			
		||||
	goto out;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										3
									
								
								mm/ksm.c
									
									
									
									
									
								
							
							
						
						
									
										3
									
								
								mm/ksm.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -1038,7 +1038,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
 | 
			
		|||
		goto out_unlock;
 | 
			
		||||
 | 
			
		||||
	if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
 | 
			
		||||
	    (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) {
 | 
			
		||||
	    (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) ||
 | 
			
		||||
						mm_tlb_flush_pending(mm)) {
 | 
			
		||||
		pte_t entry;
 | 
			
		||||
 | 
			
		||||
		swapped = PageSwapCache(page);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										42
									
								
								mm/memory.c
									
									
									
									
									
								
							
							
						
						
									
										42
									
								
								mm/memory.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -215,12 +215,8 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
 | 
			
		|||
	return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* tlb_gather_mmu
 | 
			
		||||
 *	Called to initialize an (on-stack) mmu_gather structure for page-table
 | 
			
		||||
 *	tear-down from @mm. The @fullmm argument is used when @mm is without
 | 
			
		||||
 *	users and we're going to destroy the full address space (exit/execve).
 | 
			
		||||
 */
 | 
			
		||||
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 | 
			
		||||
void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 | 
			
		||||
				unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	tlb->mm = mm;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -275,10 +271,14 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
 | 
			
		|||
 *	Called at the end of the shootdown operation to free up any resources
 | 
			
		||||
 *	that were required.
 | 
			
		||||
 */
 | 
			
		||||
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 | 
			
		||||
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
		unsigned long start, unsigned long end, bool force)
 | 
			
		||||
{
 | 
			
		||||
	struct mmu_gather_batch *batch, *next;
 | 
			
		||||
 | 
			
		||||
	if (force)
 | 
			
		||||
		__tlb_adjust_range(tlb, start, end - start);
 | 
			
		||||
 | 
			
		||||
	tlb_flush_mmu(tlb);
 | 
			
		||||
 | 
			
		||||
	/* keep the page table cache within bounds */
 | 
			
		||||
| 
						 | 
				
			
			@ -398,6 +398,34 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 | 
			
		|||
 | 
			
		||||
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
 | 
			
		||||
 | 
			
		||||
/* tlb_gather_mmu
 | 
			
		||||
 *	Called to initialize an (on-stack) mmu_gather structure for page-table
 | 
			
		||||
 *	tear-down from @mm. The @fullmm argument is used when @mm is without
 | 
			
		||||
 *	users and we're going to destroy the full address space (exit/execve).
 | 
			
		||||
 */
 | 
			
		||||
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 | 
			
		||||
			unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	arch_tlb_gather_mmu(tlb, mm, start, end);
 | 
			
		||||
	inc_tlb_flush_pending(tlb->mm);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void tlb_finish_mmu(struct mmu_gather *tlb,
 | 
			
		||||
		unsigned long start, unsigned long end)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * If there are parallel threads are doing PTE changes on same range
 | 
			
		||||
	 * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
 | 
			
		||||
	 * flush by batching, a thread has stable TLB entry can fail to flush
 | 
			
		||||
	 * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
 | 
			
		||||
	 * forcefully if we detect parallel PTE batching threads.
 | 
			
		||||
	 */
 | 
			
		||||
	bool force = mm_tlb_flush_nested(tlb->mm);
 | 
			
		||||
 | 
			
		||||
	arch_tlb_finish_mmu(tlb, start, end, force);
 | 
			
		||||
	dec_tlb_flush_pending(tlb->mm);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Note: this doesn't free the actual pages themselves. That
 | 
			
		||||
 * has been handled earlier when unmapping all the memory regions.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -244,7 +244,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
 | 
			
		|||
	BUG_ON(addr >= end);
 | 
			
		||||
	pgd = pgd_offset(mm, addr);
 | 
			
		||||
	flush_cache_range(vma, addr, end);
 | 
			
		||||
	set_tlb_flush_pending(mm);
 | 
			
		||||
	inc_tlb_flush_pending(mm);
 | 
			
		||||
	do {
 | 
			
		||||
		next = pgd_addr_end(addr, end);
 | 
			
		||||
		if (pgd_none_or_clear_bad(pgd))
 | 
			
		||||
| 
						 | 
				
			
			@ -256,7 +256,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
 | 
			
		|||
	/* Only flush the TLB if we actually modified any entries: */
 | 
			
		||||
	if (pages)
 | 
			
		||||
		flush_tlb_range(vma, start, end);
 | 
			
		||||
	clear_tlb_flush_pending(mm);
 | 
			
		||||
	dec_tlb_flush_pending(mm);
 | 
			
		||||
 | 
			
		||||
	return pages;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -4501,8 +4501,9 @@ long si_mem_available(void)
 | 
			
		|||
	 * Part of the reclaimable slab consists of items that are in use,
 | 
			
		||||
	 * and cannot be freed. Cap this estimate at the low watermark.
 | 
			
		||||
	 */
 | 
			
		||||
	available += global_page_state(NR_SLAB_RECLAIMABLE) -
 | 
			
		||||
		     min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
 | 
			
		||||
	available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
 | 
			
		||||
		     min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
 | 
			
		||||
			 wmark_low);
 | 
			
		||||
 | 
			
		||||
	if (available < 0)
 | 
			
		||||
		available = 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -4645,8 +4646,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
 | 
			
		|||
		global_node_page_state(NR_FILE_DIRTY),
 | 
			
		||||
		global_node_page_state(NR_WRITEBACK),
 | 
			
		||||
		global_node_page_state(NR_UNSTABLE_NFS),
 | 
			
		||||
		global_page_state(NR_SLAB_RECLAIMABLE),
 | 
			
		||||
		global_page_state(NR_SLAB_UNRECLAIMABLE),
 | 
			
		||||
		global_node_page_state(NR_SLAB_RECLAIMABLE),
 | 
			
		||||
		global_node_page_state(NR_SLAB_UNRECLAIMABLE),
 | 
			
		||||
		global_node_page_state(NR_FILE_MAPPED),
 | 
			
		||||
		global_node_page_state(NR_SHMEM),
 | 
			
		||||
		global_page_state(NR_PAGETABLE),
 | 
			
		||||
| 
						 | 
				
			
			@ -7711,7 +7712,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 | 
			
		|||
 | 
			
		||||
	/* Make sure the range is really isolated. */
 | 
			
		||||
	if (test_pages_isolated(outer_start, end, false)) {
 | 
			
		||||
		pr_info("%s: [%lx, %lx) PFNs busy\n",
 | 
			
		||||
		pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
 | 
			
		||||
			__func__, outer_start, end);
 | 
			
		||||
		ret = -EBUSY;
 | 
			
		||||
		goto done;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										52
									
								
								mm/rmap.c
									
									
									
									
									
								
							
							
						
						
									
										52
									
								
								mm/rmap.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -888,10 +888,10 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
 | 
			
		|||
		.flags = PVMW_SYNC,
 | 
			
		||||
	};
 | 
			
		||||
	int *cleaned = arg;
 | 
			
		||||
	bool invalidation_needed = false;
 | 
			
		||||
 | 
			
		||||
	while (page_vma_mapped_walk(&pvmw)) {
 | 
			
		||||
		int ret = 0;
 | 
			
		||||
		address = pvmw.address;
 | 
			
		||||
		if (pvmw.pte) {
 | 
			
		||||
			pte_t entry;
 | 
			
		||||
			pte_t *pte = pvmw.pte;
 | 
			
		||||
| 
						 | 
				
			
			@ -899,11 +899,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
 | 
			
		|||
			if (!pte_dirty(*pte) && !pte_write(*pte))
 | 
			
		||||
				continue;
 | 
			
		||||
 | 
			
		||||
			flush_cache_page(vma, address, pte_pfn(*pte));
 | 
			
		||||
			entry = ptep_clear_flush(vma, address, pte);
 | 
			
		||||
			flush_cache_page(vma, pvmw.address, pte_pfn(*pte));
 | 
			
		||||
			entry = ptep_clear_flush(vma, pvmw.address, pte);
 | 
			
		||||
			entry = pte_wrprotect(entry);
 | 
			
		||||
			entry = pte_mkclean(entry);
 | 
			
		||||
			set_pte_at(vma->vm_mm, address, pte, entry);
 | 
			
		||||
			set_pte_at(vma->vm_mm, pvmw.address, pte, entry);
 | 
			
		||||
			ret = 1;
 | 
			
		||||
		} else {
 | 
			
		||||
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
 | 
			
		||||
| 
						 | 
				
			
			@ -913,11 +913,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
 | 
			
		|||
			if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
 | 
			
		||||
				continue;
 | 
			
		||||
 | 
			
		||||
			flush_cache_page(vma, address, page_to_pfn(page));
 | 
			
		||||
			entry = pmdp_huge_clear_flush(vma, address, pmd);
 | 
			
		||||
			flush_cache_page(vma, pvmw.address, page_to_pfn(page));
 | 
			
		||||
			entry = pmdp_huge_clear_flush(vma, pvmw.address, pmd);
 | 
			
		||||
			entry = pmd_wrprotect(entry);
 | 
			
		||||
			entry = pmd_mkclean(entry);
 | 
			
		||||
			set_pmd_at(vma->vm_mm, address, pmd, entry);
 | 
			
		||||
			set_pmd_at(vma->vm_mm, pvmw.address, pmd, entry);
 | 
			
		||||
			ret = 1;
 | 
			
		||||
#else
 | 
			
		||||
			/* unexpected pmd-mapped page? */
 | 
			
		||||
| 
						 | 
				
			
			@ -926,11 +926,16 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
 | 
			
		|||
		}
 | 
			
		||||
 | 
			
		||||
		if (ret) {
 | 
			
		||||
			mmu_notifier_invalidate_page(vma->vm_mm, address);
 | 
			
		||||
			(*cleaned)++;
 | 
			
		||||
			invalidation_needed = true;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (invalidation_needed) {
 | 
			
		||||
		mmu_notifier_invalidate_range(vma->vm_mm, address,
 | 
			
		||||
				address + (1UL << compound_order(page)));
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1323,7 +1328,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 | 
			
		|||
	};
 | 
			
		||||
	pte_t pteval;
 | 
			
		||||
	struct page *subpage;
 | 
			
		||||
	bool ret = true;
 | 
			
		||||
	bool ret = true, invalidation_needed = false;
 | 
			
		||||
	enum ttu_flags flags = (enum ttu_flags)arg;
 | 
			
		||||
 | 
			
		||||
	/* munlock has nothing to gain from examining un-locked vmas */
 | 
			
		||||
| 
						 | 
				
			
			@ -1363,11 +1368,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 | 
			
		|||
		VM_BUG_ON_PAGE(!pvmw.pte, page);
 | 
			
		||||
 | 
			
		||||
		subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
 | 
			
		||||
		address = pvmw.address;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
		if (!(flags & TTU_IGNORE_ACCESS)) {
 | 
			
		||||
			if (ptep_clear_flush_young_notify(vma, address,
 | 
			
		||||
			if (ptep_clear_flush_young_notify(vma, pvmw.address,
 | 
			
		||||
						pvmw.pte)) {
 | 
			
		||||
				ret = false;
 | 
			
		||||
				page_vma_mapped_walk_done(&pvmw);
 | 
			
		||||
| 
						 | 
				
			
			@ -1376,7 +1379,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 | 
			
		|||
		}
 | 
			
		||||
 | 
			
		||||
		/* Nuke the page table entry. */
 | 
			
		||||
		flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
 | 
			
		||||
		flush_cache_page(vma, pvmw.address, pte_pfn(*pvmw.pte));
 | 
			
		||||
		if (should_defer_flush(mm, flags)) {
 | 
			
		||||
			/*
 | 
			
		||||
			 * We clear the PTE but do not flush so potentially
 | 
			
		||||
| 
						 | 
				
			
			@ -1386,11 +1389,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 | 
			
		|||
			 * transition on a cached TLB entry is written through
 | 
			
		||||
			 * and traps if the PTE is unmapped.
 | 
			
		||||
			 */
 | 
			
		||||
			pteval = ptep_get_and_clear(mm, address, pvmw.pte);
 | 
			
		||||
			pteval = ptep_get_and_clear(mm, pvmw.address,
 | 
			
		||||
						    pvmw.pte);
 | 
			
		||||
 | 
			
		||||
			set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
 | 
			
		||||
		} else {
 | 
			
		||||
			pteval = ptep_clear_flush(vma, address, pvmw.pte);
 | 
			
		||||
			pteval = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		/* Move the dirty bit to the page. Now the pte is gone. */
 | 
			
		||||
| 
						 | 
				
			
			@ -1405,12 +1409,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 | 
			
		|||
			if (PageHuge(page)) {
 | 
			
		||||
				int nr = 1 << compound_order(page);
 | 
			
		||||
				hugetlb_count_sub(nr, mm);
 | 
			
		||||
				set_huge_swap_pte_at(mm, address,
 | 
			
		||||
				set_huge_swap_pte_at(mm, pvmw.address,
 | 
			
		||||
						     pvmw.pte, pteval,
 | 
			
		||||
						     vma_mmu_pagesize(vma));
 | 
			
		||||
			} else {
 | 
			
		||||
				dec_mm_counter(mm, mm_counter(page));
 | 
			
		||||
				set_pte_at(mm, address, pvmw.pte, pteval);
 | 
			
		||||
				set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
		} else if (pte_unused(pteval)) {
 | 
			
		||||
| 
						 | 
				
			
			@ -1434,7 +1438,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 | 
			
		|||
			swp_pte = swp_entry_to_pte(entry);
 | 
			
		||||
			if (pte_soft_dirty(pteval))
 | 
			
		||||
				swp_pte = pte_swp_mksoft_dirty(swp_pte);
 | 
			
		||||
			set_pte_at(mm, address, pvmw.pte, swp_pte);
 | 
			
		||||
			set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
 | 
			
		||||
		} else if (PageAnon(page)) {
 | 
			
		||||
			swp_entry_t entry = { .val = page_private(subpage) };
 | 
			
		||||
			pte_t swp_pte;
 | 
			
		||||
| 
						 | 
				
			
			@ -1460,7 +1464,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 | 
			
		|||
				 * If the page was redirtied, it cannot be
 | 
			
		||||
				 * discarded. Remap the page to page table.
 | 
			
		||||
				 */
 | 
			
		||||
				set_pte_at(mm, address, pvmw.pte, pteval);
 | 
			
		||||
				set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
 | 
			
		||||
				SetPageSwapBacked(page);
 | 
			
		||||
				ret = false;
 | 
			
		||||
				page_vma_mapped_walk_done(&pvmw);
 | 
			
		||||
| 
						 | 
				
			
			@ -1468,7 +1472,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 | 
			
		|||
			}
 | 
			
		||||
 | 
			
		||||
			if (swap_duplicate(entry) < 0) {
 | 
			
		||||
				set_pte_at(mm, address, pvmw.pte, pteval);
 | 
			
		||||
				set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
 | 
			
		||||
				ret = false;
 | 
			
		||||
				page_vma_mapped_walk_done(&pvmw);
 | 
			
		||||
				break;
 | 
			
		||||
| 
						 | 
				
			
			@ -1484,14 +1488,18 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 | 
			
		|||
			swp_pte = swp_entry_to_pte(entry);
 | 
			
		||||
			if (pte_soft_dirty(pteval))
 | 
			
		||||
				swp_pte = pte_swp_mksoft_dirty(swp_pte);
 | 
			
		||||
			set_pte_at(mm, address, pvmw.pte, swp_pte);
 | 
			
		||||
			set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
 | 
			
		||||
		} else
 | 
			
		||||
			dec_mm_counter(mm, mm_counter_file(page));
 | 
			
		||||
discard:
 | 
			
		||||
		page_remove_rmap(subpage, PageHuge(page));
 | 
			
		||||
		put_page(page);
 | 
			
		||||
		mmu_notifier_invalidate_page(mm, address);
 | 
			
		||||
		invalidation_needed = true;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (invalidation_needed)
 | 
			
		||||
		mmu_notifier_invalidate_range(mm, address,
 | 
			
		||||
				address + (1UL << compound_order(page)));
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										12
									
								
								mm/shmem.c
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								mm/shmem.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -1022,7 +1022,11 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
 | 
			
		|||
			 */
 | 
			
		||||
			if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
 | 
			
		||||
				spin_lock(&sbinfo->shrinklist_lock);
 | 
			
		||||
				if (list_empty(&info->shrinklist)) {
 | 
			
		||||
				/*
 | 
			
		||||
				 * _careful to defend against unlocked access to
 | 
			
		||||
				 * ->shrink_list in shmem_unused_huge_shrink()
 | 
			
		||||
				 */
 | 
			
		||||
				if (list_empty_careful(&info->shrinklist)) {
 | 
			
		||||
					list_add_tail(&info->shrinklist,
 | 
			
		||||
							&sbinfo->shrinklist);
 | 
			
		||||
					sbinfo->shrinklist_len++;
 | 
			
		||||
| 
						 | 
				
			
			@ -1817,7 +1821,11 @@ alloc_nohuge:		page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
 | 
			
		|||
			 * to shrink under memory pressure.
 | 
			
		||||
			 */
 | 
			
		||||
			spin_lock(&sbinfo->shrinklist_lock);
 | 
			
		||||
			if (list_empty(&info->shrinklist)) {
 | 
			
		||||
			/*
 | 
			
		||||
			 * _careful to defend against unlocked access to
 | 
			
		||||
			 * ->shrink_list in shmem_unused_huge_shrink()
 | 
			
		||||
			 */
 | 
			
		||||
			if (list_empty_careful(&info->shrinklist)) {
 | 
			
		||||
				list_add_tail(&info->shrinklist,
 | 
			
		||||
						&sbinfo->shrinklist);
 | 
			
		||||
				sbinfo->shrinklist_len++;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -633,7 +633,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 | 
			
		|||
		 * which are reclaimable, under pressure.  The dentry
 | 
			
		||||
		 * cache and most inode caches should fall into this
 | 
			
		||||
		 */
 | 
			
		||||
		free += global_page_state(NR_SLAB_RECLAIMABLE);
 | 
			
		||||
		free += global_node_page_state(NR_SLAB_RECLAIMABLE);
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * Leave reserved pages. The pages are not for anonymous pages.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1731,6 +1731,13 @@ static __net_init int inet_init_net(struct net *net)
 | 
			
		|||
	net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	/* Some igmp sysctl, whose values are always used */
 | 
			
		||||
	net->ipv4.sysctl_igmp_max_memberships = 20;
 | 
			
		||||
	net->ipv4.sysctl_igmp_max_msf = 10;
 | 
			
		||||
	/* IGMP reports for link-local multicast groups are enabled by default */
 | 
			
		||||
	net->ipv4.sysctl_igmp_llm_reports = 1;
 | 
			
		||||
	net->ipv4.sysctl_igmp_qrv = 2;
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2974,12 +2974,6 @@ static int __net_init igmp_net_init(struct net *net)
 | 
			
		|||
		goto out_sock;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Sysctl initialization */
 | 
			
		||||
	net->ipv4.sysctl_igmp_max_memberships = 20;
 | 
			
		||||
	net->ipv4.sysctl_igmp_max_msf = 10;
 | 
			
		||||
	/* IGMP reports for link-local multicast groups are enabled by default */
 | 
			
		||||
	net->ipv4.sysctl_igmp_llm_reports = 1;
 | 
			
		||||
	net->ipv4.sysctl_igmp_qrv = 2;
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
out_sock:
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -965,11 +965,12 @@ static int __ip_append_data(struct sock *sk,
 | 
			
		|||
		csummode = CHECKSUM_PARTIAL;
 | 
			
		||||
 | 
			
		||||
	cork->length += length;
 | 
			
		||||
	if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
 | 
			
		||||
	     (skb && skb_is_gso(skb))) &&
 | 
			
		||||
	if ((skb && skb_is_gso(skb)) ||
 | 
			
		||||
	    (((length + (skb ? skb->len : fragheaderlen)) > mtu) &&
 | 
			
		||||
	    (skb_queue_len(queue) <= 1) &&
 | 
			
		||||
	    (sk->sk_protocol == IPPROTO_UDP) &&
 | 
			
		||||
	    (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
 | 
			
		||||
	    (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
 | 
			
		||||
	    (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) {
 | 
			
		||||
		err = ip_ufo_append_data(sk, queue, getfrag, from, length,
 | 
			
		||||
					 hh_len, fragheaderlen, transhdrlen,
 | 
			
		||||
					 maxfraglen, flags);
 | 
			
		||||
| 
						 | 
				
			
			@ -1288,6 +1289,7 @@ ssize_t	ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
 | 
			
		|||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	if ((size + skb->len > mtu) &&
 | 
			
		||||
	    (skb_queue_len(&sk->sk_write_queue) == 1) &&
 | 
			
		||||
	    (sk->sk_protocol == IPPROTO_UDP) &&
 | 
			
		||||
	    (rt->dst.dev->features & NETIF_F_UFO)) {
 | 
			
		||||
		if (skb->ip_summed != CHECKSUM_PARTIAL)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -802,7 +802,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
 | 
			
		|||
	if (is_udplite)  				 /*     UDP-Lite      */
 | 
			
		||||
		csum = udplite_csum(skb);
 | 
			
		||||
 | 
			
		||||
	else if (sk->sk_no_check_tx) {   /* UDP csum disabled */
 | 
			
		||||
	else if (sk->sk_no_check_tx && !skb_is_gso(skb)) {   /* UDP csum off */
 | 
			
		||||
 | 
			
		||||
		skb->ip_summed = CHECKSUM_NONE;
 | 
			
		||||
		goto send;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1381,11 +1381,12 @@ static int __ip6_append_data(struct sock *sk,
 | 
			
		|||
	 */
 | 
			
		||||
 | 
			
		||||
	cork->length += length;
 | 
			
		||||
	if ((((length + (skb ? skb->len : headersize)) > mtu) ||
 | 
			
		||||
	     (skb && skb_is_gso(skb))) &&
 | 
			
		||||
	if ((skb && skb_is_gso(skb)) ||
 | 
			
		||||
	    (((length + (skb ? skb->len : headersize)) > mtu) &&
 | 
			
		||||
	    (skb_queue_len(queue) <= 1) &&
 | 
			
		||||
	    (sk->sk_protocol == IPPROTO_UDP) &&
 | 
			
		||||
	    (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
 | 
			
		||||
	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
 | 
			
		||||
	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) {
 | 
			
		||||
		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
 | 
			
		||||
					  hh_len, fragheaderlen, exthdrlen,
 | 
			
		||||
					  transhdrlen, mtu, flags, fl6);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3700,14 +3700,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 | 
			
		|||
 | 
			
		||||
		if (optlen != sizeof(val))
 | 
			
		||||
			return -EINVAL;
 | 
			
		||||
		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
 | 
			
		||||
			return -EBUSY;
 | 
			
		||||
		if (copy_from_user(&val, optval, sizeof(val)))
 | 
			
		||||
			return -EFAULT;
 | 
			
		||||
		if (val > INT_MAX)
 | 
			
		||||
			return -EINVAL;
 | 
			
		||||
		po->tp_reserve = val;
 | 
			
		||||
		return 0;
 | 
			
		||||
		lock_sock(sk);
 | 
			
		||||
		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
 | 
			
		||||
			ret = -EBUSY;
 | 
			
		||||
		} else {
 | 
			
		||||
			po->tp_reserve = val;
 | 
			
		||||
			ret = 0;
 | 
			
		||||
		}
 | 
			
		||||
		release_sock(sk);
 | 
			
		||||
		return ret;
 | 
			
		||||
	}
 | 
			
		||||
	case PACKET_LOSS:
 | 
			
		||||
	{
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -49,9 +49,9 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
 | 
			
		|||
		return PTR_ERR(target);
 | 
			
		||||
 | 
			
		||||
	t->u.kernel.target = target;
 | 
			
		||||
	memset(&par, 0, sizeof(par));
 | 
			
		||||
	par.net       = net;
 | 
			
		||||
	par.table     = table;
 | 
			
		||||
	par.entryinfo = NULL;
 | 
			
		||||
	par.target    = target;
 | 
			
		||||
	par.targinfo  = t->data;
 | 
			
		||||
	par.hook_mask = hook;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1455,10 +1455,8 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
 | 
			
		|||
	/* Initiate synch mode if applicable */
 | 
			
		||||
	if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
 | 
			
		||||
		syncpt = iseqno + exp_pkts - 1;
 | 
			
		||||
		if (!tipc_link_is_up(l)) {
 | 
			
		||||
			tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
 | 
			
		||||
		if (!tipc_link_is_up(l))
 | 
			
		||||
			__tipc_node_link_up(n, bearer_id, xmitq);
 | 
			
		||||
		}
 | 
			
		||||
		if (n->state == SELF_UP_PEER_UP) {
 | 
			
		||||
			n->sync_point = syncpt;
 | 
			
		||||
			tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue