mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	Remove gfn_to_pfn_cache_invalidate_start()'s unused @may_block parameter,
which was leftover from KVM's abandoned (for now) attempt to support guest
usage of gfn_to_pfn caches.
Fixes: a4bff3df51 ("KVM: pfncache: remove KVM_GUEST_USES_PFN usage")
Reported-by: Like Xu <like.xu.linux@gmail.com>
Cc: Paul Durrant <paul@xen.org>
Cc: David Woodhouse <dwmw2@infradead.org>
Reviewed-by: Paul Durrant <paul@xen.org>
Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
Link: https://lore.kernel.org/r/20240305003742.245767-1-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
		
	
			
		
			
				
	
	
		
			64 lines
		
	
	
	
		
			1.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			64 lines
		
	
	
	
		
			1.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/* SPDX-License-Identifier: GPL-2.0-only */
 | 
						|
 | 
						|
#ifndef __KVM_MM_H__
 | 
						|
#define __KVM_MM_H__ 1
 | 
						|
 | 
						|
/*
 | 
						|
 * Architectures can choose whether to use an rwlock or spinlock
 | 
						|
 * for the mmu_lock.  These macros, for use in common code
 | 
						|
 * only, avoids using #ifdefs in places that must deal with
 | 
						|
 * multiple architectures.
 | 
						|
 */
 | 
						|
 | 
						|
#ifdef KVM_HAVE_MMU_RWLOCK
 | 
						|
#define KVM_MMU_LOCK_INIT(kvm)		rwlock_init(&(kvm)->mmu_lock)
 | 
						|
#define KVM_MMU_LOCK(kvm)		write_lock(&(kvm)->mmu_lock)
 | 
						|
#define KVM_MMU_UNLOCK(kvm)		write_unlock(&(kvm)->mmu_lock)
 | 
						|
#else
 | 
						|
#define KVM_MMU_LOCK_INIT(kvm)		spin_lock_init(&(kvm)->mmu_lock)
 | 
						|
#define KVM_MMU_LOCK(kvm)		spin_lock(&(kvm)->mmu_lock)
 | 
						|
#define KVM_MMU_UNLOCK(kvm)		spin_unlock(&(kvm)->mmu_lock)
 | 
						|
#endif /* KVM_HAVE_MMU_RWLOCK */
 | 
						|
 | 
						|
kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
 | 
						|
		     bool *async, bool write_fault, bool *writable);
 | 
						|
 | 
						|
#ifdef CONFIG_HAVE_KVM_PFNCACHE
 | 
						|
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
 | 
						|
				       unsigned long start,
 | 
						|
				       unsigned long end);
 | 
						|
#else
 | 
						|
static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
 | 
						|
						     unsigned long start,
 | 
						|
						     unsigned long end)
 | 
						|
{
 | 
						|
}
 | 
						|
#endif /* HAVE_KVM_PFNCACHE */
 | 
						|
 | 
						|
#ifdef CONFIG_KVM_PRIVATE_MEM
 | 
						|
void kvm_gmem_init(struct module *module);
 | 
						|
int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args);
 | 
						|
int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
 | 
						|
		  unsigned int fd, loff_t offset);
 | 
						|
void kvm_gmem_unbind(struct kvm_memory_slot *slot);
 | 
						|
#else
 | 
						|
static inline void kvm_gmem_init(struct module *module)
 | 
						|
{
 | 
						|
 | 
						|
}
 | 
						|
 | 
						|
static inline int kvm_gmem_bind(struct kvm *kvm,
 | 
						|
					 struct kvm_memory_slot *slot,
 | 
						|
					 unsigned int fd, loff_t offset)
 | 
						|
{
 | 
						|
	WARN_ON_ONCE(1);
 | 
						|
	return -EIO;
 | 
						|
}
 | 
						|
 | 
						|
static inline void kvm_gmem_unbind(struct kvm_memory_slot *slot)
 | 
						|
{
 | 
						|
	WARN_ON_ONCE(1);
 | 
						|
}
 | 
						|
#endif /* CONFIG_KVM_PRIVATE_MEM */
 | 
						|
 | 
						|
#endif /* __KVM_MM_H__ */
 |