forked from mirrors/linux
		
	dma-buf: rename reservation_object to dma_resv
Be more consistent with the naming of the other DMA-buf objects. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/323401/
This commit is contained in:
		
							parent
							
								
									5d344f58da
								
							
						
					
					
						commit
						52791eeec1
					
				
					 104 changed files with 523 additions and 550 deletions
				
			
		| 
						 | 
				
			
			@ -1,6 +1,6 @@
 | 
			
		|||
# SPDX-License-Identifier: GPL-2.0-only
 | 
			
		||||
obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
 | 
			
		||||
	 reservation.o seqno-fence.o
 | 
			
		||||
	 dma-resv.o seqno-fence.o
 | 
			
		||||
obj-$(CONFIG_SYNC_FILE)		+= sync_file.o
 | 
			
		||||
obj-$(CONFIG_SW_SYNC)		+= sw_sync.o sync_debug.o
 | 
			
		||||
obj-$(CONFIG_UDMABUF)		+= udmabuf.o
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -21,7 +21,7 @@
 | 
			
		|||
#include <linux/module.h>
 | 
			
		||||
#include <linux/seq_file.h>
 | 
			
		||||
#include <linux/poll.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
#include <linux/mm.h>
 | 
			
		||||
#include <linux/mount.h>
 | 
			
		||||
#include <linux/pseudo_fs.h>
 | 
			
		||||
| 
						 | 
				
			
			@ -104,8 +104,8 @@ static int dma_buf_release(struct inode *inode, struct file *file)
 | 
			
		|||
	list_del(&dmabuf->list_node);
 | 
			
		||||
	mutex_unlock(&db_list.lock);
 | 
			
		||||
 | 
			
		||||
	if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
 | 
			
		||||
		reservation_object_fini(dmabuf->resv);
 | 
			
		||||
	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
 | 
			
		||||
		dma_resv_fini(dmabuf->resv);
 | 
			
		||||
 | 
			
		||||
	module_put(dmabuf->owner);
 | 
			
		||||
	kfree(dmabuf);
 | 
			
		||||
| 
						 | 
				
			
			@ -165,7 +165,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
 | 
			
		|||
 * To support cross-device and cross-driver synchronization of buffer access
 | 
			
		||||
 * implicit fences (represented internally in the kernel with &struct fence) can
 | 
			
		||||
 * be attached to a &dma_buf. The glue for that and a few related things are
 | 
			
		||||
 * provided in the &reservation_object structure.
 | 
			
		||||
 * provided in the &dma_resv structure.
 | 
			
		||||
 *
 | 
			
		||||
 * Userspace can query the state of these implicitly tracked fences using poll()
 | 
			
		||||
 * and related system calls:
 | 
			
		||||
| 
						 | 
				
			
			@ -195,8 +195,8 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 | 
			
		|||
static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 | 
			
		||||
{
 | 
			
		||||
	struct dma_buf *dmabuf;
 | 
			
		||||
	struct reservation_object *resv;
 | 
			
		||||
	struct reservation_object_list *fobj;
 | 
			
		||||
	struct dma_resv *resv;
 | 
			
		||||
	struct dma_resv_list *fobj;
 | 
			
		||||
	struct dma_fence *fence_excl;
 | 
			
		||||
	__poll_t events;
 | 
			
		||||
	unsigned shared_count;
 | 
			
		||||
| 
						 | 
				
			
			@ -214,7 +214,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 | 
			
		|||
		return 0;
 | 
			
		||||
 | 
			
		||||
	rcu_read_lock();
 | 
			
		||||
	reservation_object_fences(resv, &fence_excl, &fobj, &shared_count);
 | 
			
		||||
	dma_resv_fences(resv, &fence_excl, &fobj, &shared_count);
 | 
			
		||||
	if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
 | 
			
		||||
		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
 | 
			
		||||
		__poll_t pevents = EPOLLIN;
 | 
			
		||||
| 
						 | 
				
			
			@ -493,13 +493,13 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
 | 
			
		|||
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 | 
			
		||||
{
 | 
			
		||||
	struct dma_buf *dmabuf;
 | 
			
		||||
	struct reservation_object *resv = exp_info->resv;
 | 
			
		||||
	struct dma_resv *resv = exp_info->resv;
 | 
			
		||||
	struct file *file;
 | 
			
		||||
	size_t alloc_size = sizeof(struct dma_buf);
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (!exp_info->resv)
 | 
			
		||||
		alloc_size += sizeof(struct reservation_object);
 | 
			
		||||
		alloc_size += sizeof(struct dma_resv);
 | 
			
		||||
	else
 | 
			
		||||
		/* prevent &dma_buf[1] == dma_buf->resv */
 | 
			
		||||
		alloc_size += 1;
 | 
			
		||||
| 
						 | 
				
			
			@ -531,8 +531,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 | 
			
		|||
	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
 | 
			
		||||
 | 
			
		||||
	if (!resv) {
 | 
			
		||||
		resv = (struct reservation_object *)&dmabuf[1];
 | 
			
		||||
		reservation_object_init(resv);
 | 
			
		||||
		resv = (struct dma_resv *)&dmabuf[1];
 | 
			
		||||
		dma_resv_init(resv);
 | 
			
		||||
	}
 | 
			
		||||
	dmabuf->resv = resv;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -896,11 +896,11 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 | 
			
		|||
{
 | 
			
		||||
	bool write = (direction == DMA_BIDIRECTIONAL ||
 | 
			
		||||
		      direction == DMA_TO_DEVICE);
 | 
			
		||||
	struct reservation_object *resv = dmabuf->resv;
 | 
			
		||||
	struct dma_resv *resv = dmabuf->resv;
 | 
			
		||||
	long ret;
 | 
			
		||||
 | 
			
		||||
	/* Wait on any implicit rendering fences */
 | 
			
		||||
	ret = reservation_object_wait_timeout_rcu(resv, write, true,
 | 
			
		||||
	ret = dma_resv_wait_timeout_rcu(resv, write, true,
 | 
			
		||||
						  MAX_SCHEDULE_TIMEOUT);
 | 
			
		||||
	if (ret < 0)
 | 
			
		||||
		return ret;
 | 
			
		||||
| 
						 | 
				
			
			@ -1141,8 +1141,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
 | 
			
		|||
	int ret;
 | 
			
		||||
	struct dma_buf *buf_obj;
 | 
			
		||||
	struct dma_buf_attachment *attach_obj;
 | 
			
		||||
	struct reservation_object *robj;
 | 
			
		||||
	struct reservation_object_list *fobj;
 | 
			
		||||
	struct dma_resv *robj;
 | 
			
		||||
	struct dma_resv_list *fobj;
 | 
			
		||||
	struct dma_fence *fence;
 | 
			
		||||
	int count = 0, attach_count, shared_count, i;
 | 
			
		||||
	size_t size = 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -1175,7 +1175,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
 | 
			
		|||
 | 
			
		||||
		robj = buf_obj->resv;
 | 
			
		||||
		rcu_read_lock();
 | 
			
		||||
		reservation_object_fences(robj, &fence, &fobj, &shared_count);
 | 
			
		||||
		dma_resv_fences(robj, &fence, &fobj, &shared_count);
 | 
			
		||||
		rcu_read_unlock();
 | 
			
		||||
 | 
			
		||||
		if (fence)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -60,7 +60,7 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
 | 
			
		|||
 *
 | 
			
		||||
 * - Then there's also implicit fencing, where the synchronization points are
 | 
			
		||||
 *   implicitly passed around as part of shared &dma_buf instances. Such
 | 
			
		||||
 *   implicit fences are stored in &struct reservation_object through the
 | 
			
		||||
 *   implicit fences are stored in &struct dma_resv through the
 | 
			
		||||
 *   &dma_buf.resv pointer.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -32,7 +32,7 @@
 | 
			
		|||
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
#include <linux/export.h>
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			@ -50,16 +50,15 @@ DEFINE_WD_CLASS(reservation_ww_class);
 | 
			
		|||
EXPORT_SYMBOL(reservation_ww_class);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * reservation_object_list_alloc - allocate fence list
 | 
			
		||||
 * dma_resv_list_alloc - allocate fence list
 | 
			
		||||
 * @shared_max: number of fences we need space for
 | 
			
		||||
 *
 | 
			
		||||
 * Allocate a new reservation_object_list and make sure to correctly initialize
 | 
			
		||||
 * Allocate a new dma_resv_list and make sure to correctly initialize
 | 
			
		||||
 * shared_max.
 | 
			
		||||
 */
 | 
			
		||||
static struct reservation_object_list *
 | 
			
		||||
reservation_object_list_alloc(unsigned int shared_max)
 | 
			
		||||
static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object_list *list;
 | 
			
		||||
	struct dma_resv_list *list;
 | 
			
		||||
 | 
			
		||||
	list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
 | 
			
		||||
	if (!list)
 | 
			
		||||
| 
						 | 
				
			
			@ -72,12 +71,12 @@ reservation_object_list_alloc(unsigned int shared_max)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * reservation_object_list_free - free fence list
 | 
			
		||||
 * dma_resv_list_free - free fence list
 | 
			
		||||
 * @list: list to free
 | 
			
		||||
 *
 | 
			
		||||
 * Free a reservation_object_list and make sure to drop all references.
 | 
			
		||||
 * Free a dma_resv_list and make sure to drop all references.
 | 
			
		||||
 */
 | 
			
		||||
static void reservation_object_list_free(struct reservation_object_list *list)
 | 
			
		||||
static void dma_resv_list_free(struct dma_resv_list *list)
 | 
			
		||||
{
 | 
			
		||||
	unsigned int i;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -91,24 +90,24 @@ static void reservation_object_list_free(struct reservation_object_list *list)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * reservation_object_init - initialize a reservation object
 | 
			
		||||
 * dma_resv_init - initialize a reservation object
 | 
			
		||||
 * @obj: the reservation object
 | 
			
		||||
 */
 | 
			
		||||
void reservation_object_init(struct reservation_object *obj)
 | 
			
		||||
void dma_resv_init(struct dma_resv *obj)
 | 
			
		||||
{
 | 
			
		||||
	ww_mutex_init(&obj->lock, &reservation_ww_class);
 | 
			
		||||
	RCU_INIT_POINTER(obj->fence, NULL);
 | 
			
		||||
	RCU_INIT_POINTER(obj->fence_excl, NULL);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(reservation_object_init);
 | 
			
		||||
EXPORT_SYMBOL(dma_resv_init);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * reservation_object_fini - destroys a reservation object
 | 
			
		||||
 * dma_resv_fini - destroys a reservation object
 | 
			
		||||
 * @obj: the reservation object
 | 
			
		||||
 */
 | 
			
		||||
void reservation_object_fini(struct reservation_object *obj)
 | 
			
		||||
void dma_resv_fini(struct dma_resv *obj)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object_list *fobj;
 | 
			
		||||
	struct dma_resv_list *fobj;
 | 
			
		||||
	struct dma_fence *excl;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			@ -120,32 +119,31 @@ void reservation_object_fini(struct reservation_object *obj)
 | 
			
		|||
		dma_fence_put(excl);
 | 
			
		||||
 | 
			
		||||
	fobj = rcu_dereference_protected(obj->fence, 1);
 | 
			
		||||
	reservation_object_list_free(fobj);
 | 
			
		||||
	dma_resv_list_free(fobj);
 | 
			
		||||
	ww_mutex_destroy(&obj->lock);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(reservation_object_fini);
 | 
			
		||||
EXPORT_SYMBOL(dma_resv_fini);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * reservation_object_reserve_shared - Reserve space to add shared fences to
 | 
			
		||||
 * a reservation_object.
 | 
			
		||||
 * dma_resv_reserve_shared - Reserve space to add shared fences to
 | 
			
		||||
 * a dma_resv.
 | 
			
		||||
 * @obj: reservation object
 | 
			
		||||
 * @num_fences: number of fences we want to add
 | 
			
		||||
 *
 | 
			
		||||
 * Should be called before reservation_object_add_shared_fence().  Must
 | 
			
		||||
 * Should be called before dma_resv_add_shared_fence().  Must
 | 
			
		||||
 * be called with obj->lock held.
 | 
			
		||||
 *
 | 
			
		||||
 * RETURNS
 | 
			
		||||
 * Zero for success, or -errno
 | 
			
		||||
 */
 | 
			
		||||
int reservation_object_reserve_shared(struct reservation_object *obj,
 | 
			
		||||
				      unsigned int num_fences)
 | 
			
		||||
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object_list *old, *new;
 | 
			
		||||
	struct dma_resv_list *old, *new;
 | 
			
		||||
	unsigned int i, j, k, max;
 | 
			
		||||
 | 
			
		||||
	reservation_object_assert_held(obj);
 | 
			
		||||
	dma_resv_assert_held(obj);
 | 
			
		||||
 | 
			
		||||
	old = reservation_object_get_list(obj);
 | 
			
		||||
	old = dma_resv_get_list(obj);
 | 
			
		||||
 | 
			
		||||
	if (old && old->shared_max) {
 | 
			
		||||
		if ((old->shared_count + num_fences) <= old->shared_max)
 | 
			
		||||
| 
						 | 
				
			
			@ -157,7 +155,7 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
 | 
			
		|||
		max = 4;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	new = reservation_object_list_alloc(max);
 | 
			
		||||
	new = dma_resv_list_alloc(max);
 | 
			
		||||
	if (!new)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -171,7 +169,7 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
 | 
			
		|||
		struct dma_fence *fence;
 | 
			
		||||
 | 
			
		||||
		fence = rcu_dereference_protected(old->shared[i],
 | 
			
		||||
						  reservation_object_held(obj));
 | 
			
		||||
						  dma_resv_held(obj));
 | 
			
		||||
		if (dma_fence_is_signaled(fence))
 | 
			
		||||
			RCU_INIT_POINTER(new->shared[--k], fence);
 | 
			
		||||
		else
 | 
			
		||||
| 
						 | 
				
			
			@ -197,41 +195,40 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
 | 
			
		|||
		struct dma_fence *fence;
 | 
			
		||||
 | 
			
		||||
		fence = rcu_dereference_protected(new->shared[i],
 | 
			
		||||
						  reservation_object_held(obj));
 | 
			
		||||
						  dma_resv_held(obj));
 | 
			
		||||
		dma_fence_put(fence);
 | 
			
		||||
	}
 | 
			
		||||
	kfree_rcu(old, rcu);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(reservation_object_reserve_shared);
 | 
			
		||||
EXPORT_SYMBOL(dma_resv_reserve_shared);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * reservation_object_add_shared_fence - Add a fence to a shared slot
 | 
			
		||||
 * dma_resv_add_shared_fence - Add a fence to a shared slot
 | 
			
		||||
 * @obj: the reservation object
 | 
			
		||||
 * @fence: the shared fence to add
 | 
			
		||||
 *
 | 
			
		||||
 * Add a fence to a shared slot, obj->lock must be held, and
 | 
			
		||||
 * reservation_object_reserve_shared() has been called.
 | 
			
		||||
 * dma_resv_reserve_shared() has been called.
 | 
			
		||||
 */
 | 
			
		||||
void reservation_object_add_shared_fence(struct reservation_object *obj,
 | 
			
		||||
					 struct dma_fence *fence)
 | 
			
		||||
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object_list *fobj;
 | 
			
		||||
	struct dma_resv_list *fobj;
 | 
			
		||||
	struct dma_fence *old;
 | 
			
		||||
	unsigned int i, count;
 | 
			
		||||
 | 
			
		||||
	dma_fence_get(fence);
 | 
			
		||||
 | 
			
		||||
	reservation_object_assert_held(obj);
 | 
			
		||||
	dma_resv_assert_held(obj);
 | 
			
		||||
 | 
			
		||||
	fobj = reservation_object_get_list(obj);
 | 
			
		||||
	fobj = dma_resv_get_list(obj);
 | 
			
		||||
	count = fobj->shared_count;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < count; ++i) {
 | 
			
		||||
 | 
			
		||||
		old = rcu_dereference_protected(fobj->shared[i],
 | 
			
		||||
						reservation_object_held(obj));
 | 
			
		||||
						dma_resv_held(obj));
 | 
			
		||||
		if (old->context == fence->context ||
 | 
			
		||||
		    dma_fence_is_signaled(old))
 | 
			
		||||
			goto replace;
 | 
			
		||||
| 
						 | 
				
			
			@ -247,25 +244,24 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
 | 
			
		|||
	smp_store_mb(fobj->shared_count, count);
 | 
			
		||||
	dma_fence_put(old);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(reservation_object_add_shared_fence);
 | 
			
		||||
EXPORT_SYMBOL(dma_resv_add_shared_fence);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * reservation_object_add_excl_fence - Add an exclusive fence.
 | 
			
		||||
 * dma_resv_add_excl_fence - Add an exclusive fence.
 | 
			
		||||
 * @obj: the reservation object
 | 
			
		||||
 * @fence: the shared fence to add
 | 
			
		||||
 *
 | 
			
		||||
 * Add a fence to the exclusive slot.  The obj->lock must be held.
 | 
			
		||||
 */
 | 
			
		||||
void reservation_object_add_excl_fence(struct reservation_object *obj,
 | 
			
		||||
				       struct dma_fence *fence)
 | 
			
		||||
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
 | 
			
		||||
{
 | 
			
		||||
	struct dma_fence *old_fence = reservation_object_get_excl(obj);
 | 
			
		||||
	struct reservation_object_list *old;
 | 
			
		||||
	struct dma_fence *old_fence = dma_resv_get_excl(obj);
 | 
			
		||||
	struct dma_resv_list *old;
 | 
			
		||||
	u32 i = 0;
 | 
			
		||||
 | 
			
		||||
	reservation_object_assert_held(obj);
 | 
			
		||||
	dma_resv_assert_held(obj);
 | 
			
		||||
 | 
			
		||||
	old = reservation_object_get_list(obj);
 | 
			
		||||
	old = dma_resv_get_list(obj);
 | 
			
		||||
	if (old)
 | 
			
		||||
		i = old->shared_count;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -282,41 +278,40 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
 | 
			
		|||
	/* inplace update, no shared fences */
 | 
			
		||||
	while (i--)
 | 
			
		||||
		dma_fence_put(rcu_dereference_protected(old->shared[i],
 | 
			
		||||
						reservation_object_held(obj)));
 | 
			
		||||
						dma_resv_held(obj)));
 | 
			
		||||
 | 
			
		||||
	dma_fence_put(old_fence);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(reservation_object_add_excl_fence);
 | 
			
		||||
EXPORT_SYMBOL(dma_resv_add_excl_fence);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
* reservation_object_copy_fences - Copy all fences from src to dst.
 | 
			
		||||
* dma_resv_copy_fences - Copy all fences from src to dst.
 | 
			
		||||
* @dst: the destination reservation object
 | 
			
		||||
* @src: the source reservation object
 | 
			
		||||
*
 | 
			
		||||
* Copy all fences from src to dst. dst-lock must be held.
 | 
			
		||||
*/
 | 
			
		||||
int reservation_object_copy_fences(struct reservation_object *dst,
 | 
			
		||||
				   struct reservation_object *src)
 | 
			
		||||
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object_list *src_list, *dst_list;
 | 
			
		||||
	struct dma_resv_list *src_list, *dst_list;
 | 
			
		||||
	struct dma_fence *old, *new;
 | 
			
		||||
	unsigned int i, shared_count;
 | 
			
		||||
 | 
			
		||||
	reservation_object_assert_held(dst);
 | 
			
		||||
	dma_resv_assert_held(dst);
 | 
			
		||||
 | 
			
		||||
	rcu_read_lock();
 | 
			
		||||
 | 
			
		||||
retry:
 | 
			
		||||
	reservation_object_fences(src, &new, &src_list, &shared_count);
 | 
			
		||||
	dma_resv_fences(src, &new, &src_list, &shared_count);
 | 
			
		||||
	if (shared_count) {
 | 
			
		||||
		rcu_read_unlock();
 | 
			
		||||
 | 
			
		||||
		dst_list = reservation_object_list_alloc(shared_count);
 | 
			
		||||
		dst_list = dma_resv_list_alloc(shared_count);
 | 
			
		||||
		if (!dst_list)
 | 
			
		||||
			return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
		rcu_read_lock();
 | 
			
		||||
		reservation_object_fences(src, &new, &src_list, &shared_count);
 | 
			
		||||
		dma_resv_fences(src, &new, &src_list, &shared_count);
 | 
			
		||||
		if (!src_list || shared_count > dst_list->shared_max) {
 | 
			
		||||
			kfree(dst_list);
 | 
			
		||||
			goto retry;
 | 
			
		||||
| 
						 | 
				
			
			@ -332,7 +327,7 @@ int reservation_object_copy_fences(struct reservation_object *dst,
 | 
			
		|||
				continue;
 | 
			
		||||
 | 
			
		||||
			if (!dma_fence_get_rcu(fence)) {
 | 
			
		||||
				reservation_object_list_free(dst_list);
 | 
			
		||||
				dma_resv_list_free(dst_list);
 | 
			
		||||
				goto retry;
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -348,28 +343,28 @@ int reservation_object_copy_fences(struct reservation_object *dst,
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	if (new && !dma_fence_get_rcu(new)) {
 | 
			
		||||
		reservation_object_list_free(dst_list);
 | 
			
		||||
		dma_resv_list_free(dst_list);
 | 
			
		||||
		goto retry;
 | 
			
		||||
	}
 | 
			
		||||
	rcu_read_unlock();
 | 
			
		||||
 | 
			
		||||
	src_list = reservation_object_get_list(dst);
 | 
			
		||||
	old = reservation_object_get_excl(dst);
 | 
			
		||||
	src_list = dma_resv_get_list(dst);
 | 
			
		||||
	old = dma_resv_get_excl(dst);
 | 
			
		||||
 | 
			
		||||
	preempt_disable();
 | 
			
		||||
	rcu_assign_pointer(dst->fence_excl, new);
 | 
			
		||||
	rcu_assign_pointer(dst->fence, dst_list);
 | 
			
		||||
	preempt_enable();
 | 
			
		||||
 | 
			
		||||
	reservation_object_list_free(src_list);
 | 
			
		||||
	dma_resv_list_free(src_list);
 | 
			
		||||
	dma_fence_put(old);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(reservation_object_copy_fences);
 | 
			
		||||
EXPORT_SYMBOL(dma_resv_copy_fences);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * reservation_object_get_fences_rcu - Get an object's shared and exclusive
 | 
			
		||||
 * dma_resv_get_fences_rcu - Get an object's shared and exclusive
 | 
			
		||||
 * fences without update side lock held
 | 
			
		||||
 * @obj: the reservation object
 | 
			
		||||
 * @pfence_excl: the returned exclusive fence (or NULL)
 | 
			
		||||
| 
						 | 
				
			
			@ -381,10 +376,10 @@ EXPORT_SYMBOL(reservation_object_copy_fences);
 | 
			
		|||
 * exclusive fence is not specified the fence is put into the array of the
 | 
			
		||||
 * shared fences as well. Returns either zero or -ENOMEM.
 | 
			
		||||
 */
 | 
			
		||||
int reservation_object_get_fences_rcu(struct reservation_object *obj,
 | 
			
		||||
				      struct dma_fence **pfence_excl,
 | 
			
		||||
				      unsigned *pshared_count,
 | 
			
		||||
				      struct dma_fence ***pshared)
 | 
			
		||||
int dma_resv_get_fences_rcu(struct dma_resv *obj,
 | 
			
		||||
			    struct dma_fence **pfence_excl,
 | 
			
		||||
			    unsigned *pshared_count,
 | 
			
		||||
			    struct dma_fence ***pshared)
 | 
			
		||||
{
 | 
			
		||||
	struct dma_fence **shared = NULL;
 | 
			
		||||
	struct dma_fence *fence_excl;
 | 
			
		||||
| 
						 | 
				
			
			@ -392,14 +387,14 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
 | 
			
		|||
	int ret = 1;
 | 
			
		||||
 | 
			
		||||
	do {
 | 
			
		||||
		struct reservation_object_list *fobj;
 | 
			
		||||
		struct dma_resv_list *fobj;
 | 
			
		||||
		unsigned int i;
 | 
			
		||||
		size_t sz = 0;
 | 
			
		||||
 | 
			
		||||
		i = 0;
 | 
			
		||||
 | 
			
		||||
		rcu_read_lock();
 | 
			
		||||
		reservation_object_fences(obj, &fence_excl, &fobj,
 | 
			
		||||
		dma_resv_fences(obj, &fence_excl, &fobj,
 | 
			
		||||
					  &shared_count);
 | 
			
		||||
 | 
			
		||||
		if (fence_excl && !dma_fence_get_rcu(fence_excl))
 | 
			
		||||
| 
						 | 
				
			
			@ -465,10 +460,10 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
 | 
			
		|||
	*pshared = shared;
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
 | 
			
		||||
EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * reservation_object_wait_timeout_rcu - Wait on reservation's objects
 | 
			
		||||
 * dma_resv_wait_timeout_rcu - Wait on reservation's objects
 | 
			
		||||
 * shared and/or exclusive fences.
 | 
			
		||||
 * @obj: the reservation object
 | 
			
		||||
 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
 | 
			
		||||
| 
						 | 
				
			
			@ -479,11 +474,11 @@ EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
 | 
			
		|||
 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
 | 
			
		||||
 * greater than zer on success.
 | 
			
		||||
 */
 | 
			
		||||
long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 | 
			
		||||
					 bool wait_all, bool intr,
 | 
			
		||||
					 unsigned long timeout)
 | 
			
		||||
long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
 | 
			
		||||
			       bool wait_all, bool intr,
 | 
			
		||||
			       unsigned long timeout)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object_list *fobj;
 | 
			
		||||
	struct dma_resv_list *fobj;
 | 
			
		||||
	struct dma_fence *fence;
 | 
			
		||||
	unsigned shared_count;
 | 
			
		||||
	long ret = timeout ? timeout : 1;
 | 
			
		||||
| 
						 | 
				
			
			@ -493,7 +488,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 | 
			
		|||
	rcu_read_lock();
 | 
			
		||||
	i = -1;
 | 
			
		||||
 | 
			
		||||
	reservation_object_fences(obj, &fence, &fobj, &shared_count);
 | 
			
		||||
	dma_resv_fences(obj, &fence, &fobj, &shared_count);
 | 
			
		||||
	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 | 
			
		||||
		if (!dma_fence_get_rcu(fence))
 | 
			
		||||
			goto unlock_retry;
 | 
			
		||||
| 
						 | 
				
			
			@ -541,11 +536,10 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 | 
			
		|||
	rcu_read_unlock();
 | 
			
		||||
	goto retry;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
 | 
			
		||||
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
static inline int
 | 
			
		||||
reservation_object_test_signaled_single(struct dma_fence *passed_fence)
 | 
			
		||||
static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
 | 
			
		||||
{
 | 
			
		||||
	struct dma_fence *fence, *lfence = passed_fence;
 | 
			
		||||
	int ret = 1;
 | 
			
		||||
| 
						 | 
				
			
			@ -562,7 +556,7 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * reservation_object_test_signaled_rcu - Test if a reservation object's
 | 
			
		||||
 * dma_resv_test_signaled_rcu - Test if a reservation object's
 | 
			
		||||
 * fences have been signaled.
 | 
			
		||||
 * @obj: the reservation object
 | 
			
		||||
 * @test_all: if true, test all fences, otherwise only test the exclusive
 | 
			
		||||
| 
						 | 
				
			
			@ -571,10 +565,9 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence)
 | 
			
		|||
 * RETURNS
 | 
			
		||||
 * true if all fences signaled, else false
 | 
			
		||||
 */
 | 
			
		||||
bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
 | 
			
		||||
					  bool test_all)
 | 
			
		||||
bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object_list *fobj;
 | 
			
		||||
	struct dma_resv_list *fobj;
 | 
			
		||||
	struct dma_fence *fence_excl;
 | 
			
		||||
	unsigned shared_count;
 | 
			
		||||
	int ret;
 | 
			
		||||
| 
						 | 
				
			
			@ -583,14 +576,14 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
 | 
			
		|||
retry:
 | 
			
		||||
	ret = true;
 | 
			
		||||
 | 
			
		||||
	reservation_object_fences(obj, &fence_excl, &fobj, &shared_count);
 | 
			
		||||
	dma_resv_fences(obj, &fence_excl, &fobj, &shared_count);
 | 
			
		||||
	if (test_all) {
 | 
			
		||||
		unsigned i;
 | 
			
		||||
 | 
			
		||||
		for (i = 0; i < shared_count; ++i) {
 | 
			
		||||
			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
 | 
			
		||||
 | 
			
		||||
			ret = reservation_object_test_signaled_single(fence);
 | 
			
		||||
			ret = dma_resv_test_signaled_single(fence);
 | 
			
		||||
			if (ret < 0)
 | 
			
		||||
				goto retry;
 | 
			
		||||
			else if (!ret)
 | 
			
		||||
| 
						 | 
				
			
			@ -599,7 +592,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	if (!shared_count && fence_excl) {
 | 
			
		||||
		ret = reservation_object_test_signaled_single(fence_excl);
 | 
			
		||||
		ret = dma_resv_test_signaled_single(fence_excl);
 | 
			
		||||
		if (ret < 0)
 | 
			
		||||
			goto retry;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -607,4 +600,4 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
 | 
			
		|||
	rcu_read_unlock();
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
 | 
			
		||||
EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
 | 
			
		||||
| 
						 | 
				
			
			@ -218,14 +218,14 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
 | 
			
		|||
static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 | 
			
		||||
					struct amdgpu_amdkfd_fence *ef)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object *resv = bo->tbo.base.resv;
 | 
			
		||||
	struct reservation_object_list *old, *new;
 | 
			
		||||
	struct dma_resv *resv = bo->tbo.base.resv;
 | 
			
		||||
	struct dma_resv_list *old, *new;
 | 
			
		||||
	unsigned int i, j, k;
 | 
			
		||||
 | 
			
		||||
	if (!ef)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	old = reservation_object_get_list(resv);
 | 
			
		||||
	old = dma_resv_get_list(resv);
 | 
			
		||||
	if (!old)
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -241,7 +241,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 | 
			
		|||
		struct dma_fence *f;
 | 
			
		||||
 | 
			
		||||
		f = rcu_dereference_protected(old->shared[i],
 | 
			
		||||
					      reservation_object_held(resv));
 | 
			
		||||
					      dma_resv_held(resv));
 | 
			
		||||
 | 
			
		||||
		if (f->context == ef->base.context)
 | 
			
		||||
			RCU_INIT_POINTER(new->shared[--j], f);
 | 
			
		||||
| 
						 | 
				
			
			@ -258,7 +258,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 | 
			
		|||
		struct dma_fence *f;
 | 
			
		||||
 | 
			
		||||
		f = rcu_dereference_protected(new->shared[i],
 | 
			
		||||
					      reservation_object_held(resv));
 | 
			
		||||
					      dma_resv_held(resv));
 | 
			
		||||
		dma_fence_put(f);
 | 
			
		||||
	}
 | 
			
		||||
	kfree_rcu(old, rcu);
 | 
			
		||||
| 
						 | 
				
			
			@ -882,7 +882,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
 | 
			
		|||
				  AMDGPU_FENCE_OWNER_KFD, false);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		goto wait_pd_fail;
 | 
			
		||||
	ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
 | 
			
		||||
	ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		goto reserve_shared_fail;
 | 
			
		||||
	amdgpu_bo_fence(vm->root.base.bo,
 | 
			
		||||
| 
						 | 
				
			
			@ -2127,7 +2127,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
 | 
			
		|||
	 * Add process eviction fence to bo so they can
 | 
			
		||||
	 * evict each other.
 | 
			
		||||
	 */
 | 
			
		||||
	ret = reservation_object_reserve_shared(gws_bo->tbo.base.resv, 1);
 | 
			
		||||
	ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		goto reserve_shared_fail;
 | 
			
		||||
	amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -730,7 +730,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
 | 
			
		|||
 | 
			
		||||
	list_for_each_entry(e, &p->validated, tv.head) {
 | 
			
		||||
		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 | 
			
		||||
		struct reservation_object *resv = bo->tbo.base.resv;
 | 
			
		||||
		struct dma_resv *resv = bo->tbo.base.resv;
 | 
			
		||||
 | 
			
		||||
		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
 | 
			
		||||
				     amdgpu_bo_explicit_sync(bo));
 | 
			
		||||
| 
						 | 
				
			
			@ -1729,7 +1729,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
 | 
			
		|||
	*map = mapping;
 | 
			
		||||
 | 
			
		||||
	/* Double check that the BO is reserved by this CS */
 | 
			
		||||
	if (reservation_object_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
 | 
			
		||||
	if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -204,7 +204,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
 | 
			
		|||
		goto unpin;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	r = reservation_object_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
 | 
			
		||||
	r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
 | 
			
		||||
					      &work->shared_count,
 | 
			
		||||
					      &work->shared);
 | 
			
		||||
	if (unlikely(r != 0)) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -137,23 +137,23 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static int
 | 
			
		||||
__reservation_object_make_exclusive(struct reservation_object *obj)
 | 
			
		||||
__dma_resv_make_exclusive(struct dma_resv *obj)
 | 
			
		||||
{
 | 
			
		||||
	struct dma_fence **fences;
 | 
			
		||||
	unsigned int count;
 | 
			
		||||
	int r;
 | 
			
		||||
 | 
			
		||||
	if (!reservation_object_get_list(obj)) /* no shared fences to convert */
 | 
			
		||||
	if (!dma_resv_get_list(obj)) /* no shared fences to convert */
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
 | 
			
		||||
	r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
 | 
			
		||||
	if (r)
 | 
			
		||||
		return r;
 | 
			
		||||
 | 
			
		||||
	if (count == 0) {
 | 
			
		||||
		/* Now that was unexpected. */
 | 
			
		||||
	} else if (count == 1) {
 | 
			
		||||
		reservation_object_add_excl_fence(obj, fences[0]);
 | 
			
		||||
		dma_resv_add_excl_fence(obj, fences[0]);
 | 
			
		||||
		dma_fence_put(fences[0]);
 | 
			
		||||
		kfree(fences);
 | 
			
		||||
	} else {
 | 
			
		||||
| 
						 | 
				
			
			@ -165,7 +165,7 @@ __reservation_object_make_exclusive(struct reservation_object *obj)
 | 
			
		|||
		if (!array)
 | 
			
		||||
			goto err_fences_put;
 | 
			
		||||
 | 
			
		||||
		reservation_object_add_excl_fence(obj, &array->base);
 | 
			
		||||
		dma_resv_add_excl_fence(obj, &array->base);
 | 
			
		||||
		dma_fence_put(&array->base);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -216,7 +216,7 @@ static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
 | 
			
		|||
		 * fences on the reservation object into a single exclusive
 | 
			
		||||
		 * fence.
 | 
			
		||||
		 */
 | 
			
		||||
		r = __reservation_object_make_exclusive(bo->tbo.base.resv);
 | 
			
		||||
		r = __dma_resv_make_exclusive(bo->tbo.base.resv);
 | 
			
		||||
		if (r)
 | 
			
		||||
			goto error_unreserve;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -367,7 +367,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
 | 
			
		|||
				 struct dma_buf_attachment *attach,
 | 
			
		||||
				 struct sg_table *sg)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object *resv = attach->dmabuf->resv;
 | 
			
		||||
	struct dma_resv *resv = attach->dmabuf->resv;
 | 
			
		||||
	struct amdgpu_device *adev = dev->dev_private;
 | 
			
		||||
	struct amdgpu_bo *bo;
 | 
			
		||||
	struct amdgpu_bo_param bp;
 | 
			
		||||
| 
						 | 
				
			
			@ -380,7 +380,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
 | 
			
		|||
	bp.flags = 0;
 | 
			
		||||
	bp.type = ttm_bo_type_sg;
 | 
			
		||||
	bp.resv = resv;
 | 
			
		||||
	reservation_object_lock(resv, NULL);
 | 
			
		||||
	dma_resv_lock(resv, NULL);
 | 
			
		||||
	ret = amdgpu_bo_create(adev, &bp, &bo);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		goto error;
 | 
			
		||||
| 
						 | 
				
			
			@ -392,11 +392,11 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
 | 
			
		|||
	if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
 | 
			
		||||
		bo->prime_shared_count = 1;
 | 
			
		||||
 | 
			
		||||
	reservation_object_unlock(resv);
 | 
			
		||||
	dma_resv_unlock(resv);
 | 
			
		||||
	return &bo->tbo.base;
 | 
			
		||||
 | 
			
		||||
error:
 | 
			
		||||
	reservation_object_unlock(resv);
 | 
			
		||||
	dma_resv_unlock(resv);
 | 
			
		||||
	return ERR_PTR(ret);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -50,7 +50,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
 | 
			
		|||
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 | 
			
		||||
			     int alignment, u32 initial_domain,
 | 
			
		||||
			     u64 flags, enum ttm_bo_type type,
 | 
			
		||||
			     struct reservation_object *resv,
 | 
			
		||||
			     struct dma_resv *resv,
 | 
			
		||||
			     struct drm_gem_object **obj)
 | 
			
		||||
{
 | 
			
		||||
	struct amdgpu_bo *bo;
 | 
			
		||||
| 
						 | 
				
			
			@ -215,7 +215,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
 | 
			
		|||
	union drm_amdgpu_gem_create *args = data;
 | 
			
		||||
	uint64_t flags = args->in.domain_flags;
 | 
			
		||||
	uint64_t size = args->in.bo_size;
 | 
			
		||||
	struct reservation_object *resv = NULL;
 | 
			
		||||
	struct dma_resv *resv = NULL;
 | 
			
		||||
	struct drm_gem_object *gobj;
 | 
			
		||||
	uint32_t handle;
 | 
			
		||||
	int r;
 | 
			
		||||
| 
						 | 
				
			
			@ -433,7 +433,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 | 
			
		|||
		return -ENOENT;
 | 
			
		||||
	}
 | 
			
		||||
	robj = gem_to_amdgpu_bo(gobj);
 | 
			
		||||
	ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true,
 | 
			
		||||
	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
 | 
			
		||||
						  timeout);
 | 
			
		||||
 | 
			
		||||
	/* ret == 0 means not signaled,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -47,7 +47,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev);
 | 
			
		|||
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 | 
			
		||||
			     int alignment, u32 initial_domain,
 | 
			
		||||
			     u64 flags, enum ttm_bo_type type,
 | 
			
		||||
			     struct reservation_object *resv,
 | 
			
		||||
			     struct dma_resv *resv,
 | 
			
		||||
			     struct drm_gem_object **obj);
 | 
			
		||||
 | 
			
		||||
int amdgpu_mode_dumb_create(struct drm_file *file_priv,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -104,7 +104,7 @@ static void amdgpu_pasid_free_cb(struct dma_fence *fence,
 | 
			
		|||
 *
 | 
			
		||||
 * Free the pasid only after all the fences in resv are signaled.
 | 
			
		||||
 */
 | 
			
		||||
void amdgpu_pasid_free_delayed(struct reservation_object *resv,
 | 
			
		||||
void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 | 
			
		||||
			       unsigned int pasid)
 | 
			
		||||
{
 | 
			
		||||
	struct dma_fence *fence, **fences;
 | 
			
		||||
| 
						 | 
				
			
			@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct reservation_object *resv,
 | 
			
		|||
	unsigned count;
 | 
			
		||||
	int r;
 | 
			
		||||
 | 
			
		||||
	r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences);
 | 
			
		||||
	r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
 | 
			
		||||
	if (r)
 | 
			
		||||
		goto fallback;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -156,7 +156,7 @@ void amdgpu_pasid_free_delayed(struct reservation_object *resv,
 | 
			
		|||
	/* Not enough memory for the delayed delete, as last resort
 | 
			
		||||
	 * block for all the fences to complete.
 | 
			
		||||
	 */
 | 
			
		||||
	reservation_object_wait_timeout_rcu(resv, true, false,
 | 
			
		||||
	dma_resv_wait_timeout_rcu(resv, true, false,
 | 
			
		||||
					    MAX_SCHEDULE_TIMEOUT);
 | 
			
		||||
	amdgpu_pasid_free(pasid);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -72,7 +72,7 @@ struct amdgpu_vmid_mgr {
 | 
			
		|||
 | 
			
		||||
int amdgpu_pasid_alloc(unsigned int bits);
 | 
			
		||||
void amdgpu_pasid_free(unsigned int pasid);
 | 
			
		||||
void amdgpu_pasid_free_delayed(struct reservation_object *resv,
 | 
			
		||||
void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 | 
			
		||||
			       unsigned int pasid);
 | 
			
		||||
 | 
			
		||||
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -179,7 +179,7 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
 | 
			
		|||
		if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
		r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
 | 
			
		||||
		r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
 | 
			
		||||
			true, false, MAX_SCHEDULE_TIMEOUT);
 | 
			
		||||
		if (r <= 0)
 | 
			
		||||
			DRM_ERROR("(%ld) failed to wait for user bo\n", r);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -544,7 +544,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
 | 
			
		|||
 | 
			
		||||
fail_unreserve:
 | 
			
		||||
	if (!bp->resv)
 | 
			
		||||
		reservation_object_unlock(bo->tbo.base.resv);
 | 
			
		||||
		dma_resv_unlock(bo->tbo.base.resv);
 | 
			
		||||
	amdgpu_bo_unref(&bo);
 | 
			
		||||
	return r;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -606,13 +606,13 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
 | 
			
		|||
 | 
			
		||||
	if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
 | 
			
		||||
		if (!bp->resv)
 | 
			
		||||
			WARN_ON(reservation_object_lock((*bo_ptr)->tbo.base.resv,
 | 
			
		||||
			WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
 | 
			
		||||
							NULL));
 | 
			
		||||
 | 
			
		||||
		r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
 | 
			
		||||
 | 
			
		||||
		if (!bp->resv)
 | 
			
		||||
			reservation_object_unlock((*bo_ptr)->tbo.base.resv);
 | 
			
		||||
			dma_resv_unlock((*bo_ptr)->tbo.base.resv);
 | 
			
		||||
 | 
			
		||||
		if (r)
 | 
			
		||||
			amdgpu_bo_unref(bo_ptr);
 | 
			
		||||
| 
						 | 
				
			
			@ -709,7 +709,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 | 
			
		|||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, false, false,
 | 
			
		||||
	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
 | 
			
		||||
						MAX_SCHEDULE_TIMEOUT);
 | 
			
		||||
	if (r < 0)
 | 
			
		||||
		return r;
 | 
			
		||||
| 
						 | 
				
			
			@ -1087,7 +1087,7 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
 | 
			
		|||
 */
 | 
			
		||||
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
 | 
			
		||||
{
 | 
			
		||||
	reservation_object_assert_held(bo->tbo.base.resv);
 | 
			
		||||
	dma_resv_assert_held(bo->tbo.base.resv);
 | 
			
		||||
 | 
			
		||||
	if (tiling_flags)
 | 
			
		||||
		*tiling_flags = bo->tiling_flags;
 | 
			
		||||
| 
						 | 
				
			
			@ -1283,12 +1283,12 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 | 
			
		|||
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
 | 
			
		||||
		     bool shared)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object *resv = bo->tbo.base.resv;
 | 
			
		||||
	struct dma_resv *resv = bo->tbo.base.resv;
 | 
			
		||||
 | 
			
		||||
	if (shared)
 | 
			
		||||
		reservation_object_add_shared_fence(resv, fence);
 | 
			
		||||
		dma_resv_add_shared_fence(resv, fence);
 | 
			
		||||
	else
 | 
			
		||||
		reservation_object_add_excl_fence(resv, fence);
 | 
			
		||||
		dma_resv_add_excl_fence(resv, fence);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			@ -1328,7 +1328,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
 | 
			
		|||
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 | 
			
		||||
{
 | 
			
		||||
	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
 | 
			
		||||
	WARN_ON_ONCE(!reservation_object_is_locked(bo->tbo.base.resv) &&
 | 
			
		||||
	WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
 | 
			
		||||
		     !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
 | 
			
		||||
	WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
 | 
			
		||||
	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -41,7 +41,7 @@ struct amdgpu_bo_param {
 | 
			
		|||
	u32				preferred_domain;
 | 
			
		||||
	u64				flags;
 | 
			
		||||
	enum ttm_bo_type		type;
 | 
			
		||||
	struct reservation_object	*resv;
 | 
			
		||||
	struct dma_resv	*resv;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/* bo virtual addresses in a vm */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -190,10 +190,10 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 | 
			
		|||
 */
 | 
			
		||||
int amdgpu_sync_resv(struct amdgpu_device *adev,
 | 
			
		||||
		     struct amdgpu_sync *sync,
 | 
			
		||||
		     struct reservation_object *resv,
 | 
			
		||||
		     struct dma_resv *resv,
 | 
			
		||||
		     void *owner, bool explicit_sync)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object_list *flist;
 | 
			
		||||
	struct dma_resv_list *flist;
 | 
			
		||||
	struct dma_fence *f;
 | 
			
		||||
	void *fence_owner;
 | 
			
		||||
	unsigned i;
 | 
			
		||||
| 
						 | 
				
			
			@ -203,16 +203,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 | 
			
		|||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	/* always sync to the exclusive fence */
 | 
			
		||||
	f = reservation_object_get_excl(resv);
 | 
			
		||||
	f = dma_resv_get_excl(resv);
 | 
			
		||||
	r = amdgpu_sync_fence(adev, sync, f, false);
 | 
			
		||||
 | 
			
		||||
	flist = reservation_object_get_list(resv);
 | 
			
		||||
	flist = dma_resv_get_list(resv);
 | 
			
		||||
	if (!flist || r)
 | 
			
		||||
		return r;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < flist->shared_count; ++i) {
 | 
			
		||||
		f = rcu_dereference_protected(flist->shared[i],
 | 
			
		||||
					      reservation_object_held(resv));
 | 
			
		||||
					      dma_resv_held(resv));
 | 
			
		||||
		/* We only want to trigger KFD eviction fences on
 | 
			
		||||
		 * evict or move jobs. Skip KFD fences otherwise.
 | 
			
		||||
		 */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -27,7 +27,7 @@
 | 
			
		|||
#include <linux/hashtable.h>
 | 
			
		||||
 | 
			
		||||
struct dma_fence;
 | 
			
		||||
struct reservation_object;
 | 
			
		||||
struct dma_resv;
 | 
			
		||||
struct amdgpu_device;
 | 
			
		||||
struct amdgpu_ring;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -44,7 +44,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 | 
			
		|||
		      struct dma_fence *f, bool explicit);
 | 
			
		||||
int amdgpu_sync_resv(struct amdgpu_device *adev,
 | 
			
		||||
		     struct amdgpu_sync *sync,
 | 
			
		||||
		     struct reservation_object *resv,
 | 
			
		||||
		     struct dma_resv *resv,
 | 
			
		||||
		     void *owner,
 | 
			
		||||
		     bool explicit_sync);
 | 
			
		||||
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -303,7 +303,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 | 
			
		|||
			       struct amdgpu_copy_mem *src,
 | 
			
		||||
			       struct amdgpu_copy_mem *dst,
 | 
			
		||||
			       uint64_t size,
 | 
			
		||||
			       struct reservation_object *resv,
 | 
			
		||||
			       struct dma_resv *resv,
 | 
			
		||||
			       struct dma_fence **f)
 | 
			
		||||
{
 | 
			
		||||
	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 | 
			
		||||
| 
						 | 
				
			
			@ -1470,7 +1470,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 | 
			
		|||
{
 | 
			
		||||
	unsigned long num_pages = bo->mem.num_pages;
 | 
			
		||||
	struct drm_mm_node *node = bo->mem.mm_node;
 | 
			
		||||
	struct reservation_object_list *flist;
 | 
			
		||||
	struct dma_resv_list *flist;
 | 
			
		||||
	struct dma_fence *f;
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1478,18 +1478,18 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 | 
			
		|||
	 * cleanly handle page faults.
 | 
			
		||||
	 */
 | 
			
		||||
	if (bo->type == ttm_bo_type_kernel &&
 | 
			
		||||
	    !reservation_object_test_signaled_rcu(bo->base.resv, true))
 | 
			
		||||
	    !dma_resv_test_signaled_rcu(bo->base.resv, true))
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	/* If bo is a KFD BO, check if the bo belongs to the current process.
 | 
			
		||||
	 * If true, then return false as any KFD process needs all its BOs to
 | 
			
		||||
	 * be resident to run successfully
 | 
			
		||||
	 */
 | 
			
		||||
	flist = reservation_object_get_list(bo->base.resv);
 | 
			
		||||
	flist = dma_resv_get_list(bo->base.resv);
 | 
			
		||||
	if (flist) {
 | 
			
		||||
		for (i = 0; i < flist->shared_count; ++i) {
 | 
			
		||||
			f = rcu_dereference_protected(flist->shared[i],
 | 
			
		||||
				reservation_object_held(bo->base.resv));
 | 
			
		||||
				dma_resv_held(bo->base.resv));
 | 
			
		||||
			if (amdkfd_fence_check_mm(f, current->mm))
 | 
			
		||||
				return false;
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -1992,7 +1992,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
 | 
			
		|||
 | 
			
		||||
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
 | 
			
		||||
		       uint64_t dst_offset, uint32_t byte_count,
 | 
			
		||||
		       struct reservation_object *resv,
 | 
			
		||||
		       struct dma_resv *resv,
 | 
			
		||||
		       struct dma_fence **fence, bool direct_submit,
 | 
			
		||||
		       bool vm_needs_flush)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -2066,7 +2066,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
 | 
			
		|||
 | 
			
		||||
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 | 
			
		||||
		       uint32_t src_data,
 | 
			
		||||
		       struct reservation_object *resv,
 | 
			
		||||
		       struct dma_resv *resv,
 | 
			
		||||
		       struct dma_fence **fence)
 | 
			
		||||
{
 | 
			
		||||
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -83,18 +83,18 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
 | 
			
		|||
 | 
			
		||||
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
 | 
			
		||||
		       uint64_t dst_offset, uint32_t byte_count,
 | 
			
		||||
		       struct reservation_object *resv,
 | 
			
		||||
		       struct dma_resv *resv,
 | 
			
		||||
		       struct dma_fence **fence, bool direct_submit,
 | 
			
		||||
		       bool vm_needs_flush);
 | 
			
		||||
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 | 
			
		||||
			       struct amdgpu_copy_mem *src,
 | 
			
		||||
			       struct amdgpu_copy_mem *dst,
 | 
			
		||||
			       uint64_t size,
 | 
			
		||||
			       struct reservation_object *resv,
 | 
			
		||||
			       struct dma_resv *resv,
 | 
			
		||||
			       struct dma_fence **f);
 | 
			
		||||
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 | 
			
		||||
			uint32_t src_data,
 | 
			
		||||
			struct reservation_object *resv,
 | 
			
		||||
			struct dma_resv *resv,
 | 
			
		||||
			struct dma_fence **fence);
 | 
			
		||||
 | 
			
		||||
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1073,7 +1073,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 | 
			
		|||
	ib->length_dw = 16;
 | 
			
		||||
 | 
			
		||||
	if (direct) {
 | 
			
		||||
		r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
 | 
			
		||||
		r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
 | 
			
		||||
							true, false,
 | 
			
		||||
							msecs_to_jiffies(10));
 | 
			
		||||
		if (r == 0)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1702,7 +1702,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 | 
			
		|||
			ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
 | 
			
		||||
			pages_addr = ttm->dma_address;
 | 
			
		||||
		}
 | 
			
		||||
		exclusive = reservation_object_get_excl(bo->tbo.base.resv);
 | 
			
		||||
		exclusive = dma_resv_get_excl(bo->tbo.base.resv);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (bo) {
 | 
			
		||||
| 
						 | 
				
			
			@ -1879,18 +1879,18 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
 | 
			
		|||
 */
 | 
			
		||||
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object *resv = vm->root.base.bo->tbo.base.resv;
 | 
			
		||||
	struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
 | 
			
		||||
	struct dma_fence *excl, **shared;
 | 
			
		||||
	unsigned i, shared_count;
 | 
			
		||||
	int r;
 | 
			
		||||
 | 
			
		||||
	r = reservation_object_get_fences_rcu(resv, &excl,
 | 
			
		||||
	r = dma_resv_get_fences_rcu(resv, &excl,
 | 
			
		||||
					      &shared_count, &shared);
 | 
			
		||||
	if (r) {
 | 
			
		||||
		/* Not enough memory to grab the fence list, as last resort
 | 
			
		||||
		 * block for all the fences to complete.
 | 
			
		||||
		 */
 | 
			
		||||
		reservation_object_wait_timeout_rcu(resv, true, false,
 | 
			
		||||
		dma_resv_wait_timeout_rcu(resv, true, false,
 | 
			
		||||
						    MAX_SCHEDULE_TIMEOUT);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -1978,7 +1978,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
 | 
			
		|||
			   struct amdgpu_vm *vm)
 | 
			
		||||
{
 | 
			
		||||
	struct amdgpu_bo_va *bo_va, *tmp;
 | 
			
		||||
	struct reservation_object *resv;
 | 
			
		||||
	struct dma_resv *resv;
 | 
			
		||||
	bool clear;
 | 
			
		||||
	int r;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1997,7 +1997,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
 | 
			
		|||
		spin_unlock(&vm->invalidated_lock);
 | 
			
		||||
 | 
			
		||||
		/* Try to reserve the BO to avoid clearing its ptes */
 | 
			
		||||
		if (!amdgpu_vm_debug && reservation_object_trylock(resv))
 | 
			
		||||
		if (!amdgpu_vm_debug && dma_resv_trylock(resv))
 | 
			
		||||
			clear = false;
 | 
			
		||||
		/* Somebody else is using the BO right now */
 | 
			
		||||
		else
 | 
			
		||||
| 
						 | 
				
			
			@ -2008,7 +2008,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
 | 
			
		|||
			return r;
 | 
			
		||||
 | 
			
		||||
		if (!clear)
 | 
			
		||||
			reservation_object_unlock(resv);
 | 
			
		||||
			dma_resv_unlock(resv);
 | 
			
		||||
		spin_lock(&vm->invalidated_lock);
 | 
			
		||||
	}
 | 
			
		||||
	spin_unlock(&vm->invalidated_lock);
 | 
			
		||||
| 
						 | 
				
			
			@ -2416,7 +2416,7 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
 | 
			
		|||
			struct amdgpu_bo *bo;
 | 
			
		||||
 | 
			
		||||
			bo = mapping->bo_va->base.bo;
 | 
			
		||||
			if (reservation_object_locking_ctx(bo->tbo.base.resv) !=
 | 
			
		||||
			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
 | 
			
		||||
			    ticket)
 | 
			
		||||
				continue;
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -2649,7 +2649,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
 | 
			
		|||
 */
 | 
			
		||||
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
 | 
			
		||||
{
 | 
			
		||||
	return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
 | 
			
		||||
	return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
 | 
			
		||||
						   true, true, timeout);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -2724,7 +2724,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 | 
			
		|||
	if (r)
 | 
			
		||||
		goto error_free_root;
 | 
			
		||||
 | 
			
		||||
	r = reservation_object_reserve_shared(root->tbo.base.resv, 1);
 | 
			
		||||
	r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
 | 
			
		||||
	if (r)
 | 
			
		||||
		goto error_unreserve;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5693,7 +5693,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 | 
			
		|||
		 * deadlock during GPU reset when this fence will not signal
 | 
			
		||||
		 * but we hold reservation lock for the BO.
 | 
			
		||||
		 */
 | 
			
		||||
		r = reservation_object_wait_timeout_rcu(abo->tbo.base.resv, true,
 | 
			
		||||
		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
 | 
			
		||||
							false,
 | 
			
		||||
							msecs_to_jiffies(5000));
 | 
			
		||||
		if (unlikely(r <= 0))
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1037,7 +1037,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
 | 
			
		|||
 * As a contrast, with implicit fencing the kernel keeps track of any
 | 
			
		||||
 * ongoing rendering, and automatically ensures that the atomic update waits
 | 
			
		||||
 * for any pending rendering to complete. For shared buffers represented with
 | 
			
		||||
 * a &struct dma_buf this is tracked in &struct reservation_object.
 | 
			
		||||
 * a &struct dma_buf this is tracked in &struct dma_resv.
 | 
			
		||||
 * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
 | 
			
		||||
 * whereas explicit fencing is what Android wants.
 | 
			
		||||
 *
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -159,7 +159,7 @@ void drm_gem_private_object_init(struct drm_device *dev,
 | 
			
		|||
	kref_init(&obj->refcount);
 | 
			
		||||
	obj->handle_count = 0;
 | 
			
		||||
	obj->size = size;
 | 
			
		||||
	reservation_object_init(&obj->_resv);
 | 
			
		||||
	dma_resv_init(&obj->_resv);
 | 
			
		||||
	if (!obj->resv)
 | 
			
		||||
		obj->resv = &obj->_resv;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -755,7 +755,7 @@ drm_gem_object_lookup(struct drm_file *filp, u32 handle)
 | 
			
		|||
EXPORT_SYMBOL(drm_gem_object_lookup);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects
 | 
			
		||||
 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
 | 
			
		||||
 * shared and/or exclusive fences.
 | 
			
		||||
 * @filep: DRM file private date
 | 
			
		||||
 * @handle: userspace handle
 | 
			
		||||
| 
						 | 
				
			
			@ -767,7 +767,7 @@ EXPORT_SYMBOL(drm_gem_object_lookup);
 | 
			
		|||
 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
 | 
			
		||||
 * greater than 0 on success.
 | 
			
		||||
 */
 | 
			
		||||
long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
 | 
			
		||||
long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
 | 
			
		||||
				    bool wait_all, unsigned long timeout)
 | 
			
		||||
{
 | 
			
		||||
	long ret;
 | 
			
		||||
| 
						 | 
				
			
			@ -779,7 +779,7 @@ long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
 | 
			
		|||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all,
 | 
			
		||||
	ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
 | 
			
		||||
						  true, timeout);
 | 
			
		||||
	if (ret == 0)
 | 
			
		||||
		ret = -ETIME;
 | 
			
		||||
| 
						 | 
				
			
			@ -790,7 +790,7 @@ long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
 | 
			
		|||
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(drm_gem_reservation_object_wait);
 | 
			
		||||
EXPORT_SYMBOL(drm_gem_dma_resv_wait);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
 | 
			
		||||
| 
						 | 
				
			
			@ -956,7 +956,7 @@ drm_gem_object_release(struct drm_gem_object *obj)
 | 
			
		|||
	if (obj->filp)
 | 
			
		||||
		fput(obj->filp);
 | 
			
		||||
 | 
			
		||||
	reservation_object_fini(&obj->_resv);
 | 
			
		||||
	dma_resv_fini(&obj->_resv);
 | 
			
		||||
	drm_gem_free_mmap_offset(obj);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(drm_gem_object_release);
 | 
			
		||||
| 
						 | 
				
			
			@ -1291,7 +1291,7 @@ drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
 | 
			
		|||
	if (contended != -1) {
 | 
			
		||||
		struct drm_gem_object *obj = objs[contended];
 | 
			
		||||
 | 
			
		||||
		ret = reservation_object_lock_slow_interruptible(obj->resv,
 | 
			
		||||
		ret = dma_resv_lock_slow_interruptible(obj->resv,
 | 
			
		||||
								 acquire_ctx);
 | 
			
		||||
		if (ret) {
 | 
			
		||||
			ww_acquire_done(acquire_ctx);
 | 
			
		||||
| 
						 | 
				
			
			@ -1303,16 +1303,16 @@ drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
 | 
			
		|||
		if (i == contended)
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
		ret = reservation_object_lock_interruptible(objs[i]->resv,
 | 
			
		||||
		ret = dma_resv_lock_interruptible(objs[i]->resv,
 | 
			
		||||
							    acquire_ctx);
 | 
			
		||||
		if (ret) {
 | 
			
		||||
			int j;
 | 
			
		||||
 | 
			
		||||
			for (j = 0; j < i; j++)
 | 
			
		||||
				reservation_object_unlock(objs[j]->resv);
 | 
			
		||||
				dma_resv_unlock(objs[j]->resv);
 | 
			
		||||
 | 
			
		||||
			if (contended != -1 && contended >= i)
 | 
			
		||||
				reservation_object_unlock(objs[contended]->resv);
 | 
			
		||||
				dma_resv_unlock(objs[contended]->resv);
 | 
			
		||||
 | 
			
		||||
			if (ret == -EDEADLK) {
 | 
			
		||||
				contended = i;
 | 
			
		||||
| 
						 | 
				
			
			@ -1337,7 +1337,7 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
 | 
			
		|||
	int i;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < count; i++)
 | 
			
		||||
		reservation_object_unlock(objs[i]->resv);
 | 
			
		||||
		dma_resv_unlock(objs[i]->resv);
 | 
			
		||||
 | 
			
		||||
	ww_acquire_fini(acquire_ctx);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1413,12 +1413,12 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
 | 
			
		|||
 | 
			
		||||
	if (!write) {
 | 
			
		||||
		struct dma_fence *fence =
 | 
			
		||||
			reservation_object_get_excl_rcu(obj->resv);
 | 
			
		||||
			dma_resv_get_excl_rcu(obj->resv);
 | 
			
		||||
 | 
			
		||||
		return drm_gem_fence_array_add(fence_array, fence);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = reservation_object_get_fences_rcu(obj->resv, NULL,
 | 
			
		||||
	ret = dma_resv_get_fences_rcu(obj->resv, NULL,
 | 
			
		||||
						&fence_count, &fences);
 | 
			
		||||
	if (ret || !fence_count)
 | 
			
		||||
		return ret;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -7,7 +7,7 @@
 | 
			
		|||
 | 
			
		||||
#include <linux/dma-buf.h>
 | 
			
		||||
#include <linux/dma-fence.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
#include <linux/slab.h>
 | 
			
		||||
 | 
			
		||||
#include <drm/drm_atomic.h>
 | 
			
		||||
| 
						 | 
				
			
			@ -294,7 +294,7 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
 | 
			
		|||
		return 0;
 | 
			
		||||
 | 
			
		||||
	obj = drm_gem_fb_get_obj(state->fb, 0);
 | 
			
		||||
	fence = reservation_object_get_excl_rcu(obj->resv);
 | 
			
		||||
	fence = dma_resv_get_excl_rcu(obj->resv);
 | 
			
		||||
	drm_atomic_set_fence_for_plane(state, fence);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -397,13 +397,13 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	if (op & ETNA_PREP_NOSYNC) {
 | 
			
		||||
		if (!reservation_object_test_signaled_rcu(obj->resv,
 | 
			
		||||
		if (!dma_resv_test_signaled_rcu(obj->resv,
 | 
			
		||||
							  write))
 | 
			
		||||
			return -EBUSY;
 | 
			
		||||
	} else {
 | 
			
		||||
		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
 | 
			
		||||
 | 
			
		||||
		ret = reservation_object_wait_timeout_rcu(obj->resv,
 | 
			
		||||
		ret = dma_resv_wait_timeout_rcu(obj->resv,
 | 
			
		||||
							  write, true, remain);
 | 
			
		||||
		if (ret <= 0)
 | 
			
		||||
			return ret == 0 ? -ETIMEDOUT : ret;
 | 
			
		||||
| 
						 | 
				
			
			@ -459,8 +459,8 @@ static void etnaviv_gem_describe_fence(struct dma_fence *fence,
 | 
			
		|||
static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 | 
			
		||||
{
 | 
			
		||||
	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 | 
			
		||||
	struct reservation_object *robj = obj->resv;
 | 
			
		||||
	struct reservation_object_list *fobj;
 | 
			
		||||
	struct dma_resv *robj = obj->resv;
 | 
			
		||||
	struct dma_resv_list *fobj;
 | 
			
		||||
	struct dma_fence *fence;
 | 
			
		||||
	unsigned long off = drm_vma_node_start(&obj->vma_node);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -6,7 +6,7 @@
 | 
			
		|||
#ifndef __ETNAVIV_GEM_H__
 | 
			
		||||
#define __ETNAVIV_GEM_H__
 | 
			
		||||
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
#include "etnaviv_cmdbuf.h"
 | 
			
		||||
#include "etnaviv_drv.h"
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -4,7 +4,7 @@
 | 
			
		|||
 */
 | 
			
		||||
 | 
			
		||||
#include <linux/dma-fence-array.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
#include <linux/sync_file.h>
 | 
			
		||||
#include "etnaviv_cmdbuf.h"
 | 
			
		||||
#include "etnaviv_drv.h"
 | 
			
		||||
| 
						 | 
				
			
			@ -165,10 +165,10 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 | 
			
		|||
 | 
			
		||||
	for (i = 0; i < submit->nr_bos; i++) {
 | 
			
		||||
		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
 | 
			
		||||
		struct reservation_object *robj = bo->obj->base.resv;
 | 
			
		||||
		struct dma_resv *robj = bo->obj->base.resv;
 | 
			
		||||
 | 
			
		||||
		if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
 | 
			
		||||
			ret = reservation_object_reserve_shared(robj, 1);
 | 
			
		||||
			ret = dma_resv_reserve_shared(robj, 1);
 | 
			
		||||
			if (ret)
 | 
			
		||||
				return ret;
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -177,13 +177,13 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 | 
			
		|||
			continue;
 | 
			
		||||
 | 
			
		||||
		if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
 | 
			
		||||
			ret = reservation_object_get_fences_rcu(robj, &bo->excl,
 | 
			
		||||
			ret = dma_resv_get_fences_rcu(robj, &bo->excl,
 | 
			
		||||
								&bo->nr_shared,
 | 
			
		||||
								&bo->shared);
 | 
			
		||||
			if (ret)
 | 
			
		||||
				return ret;
 | 
			
		||||
		} else {
 | 
			
		||||
			bo->excl = reservation_object_get_excl_rcu(robj);
 | 
			
		||||
			bo->excl = dma_resv_get_excl_rcu(robj);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -199,10 +199,10 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
 | 
			
		|||
		struct drm_gem_object *obj = &submit->bos[i].obj->base;
 | 
			
		||||
 | 
			
		||||
		if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
 | 
			
		||||
			reservation_object_add_excl_fence(obj->resv,
 | 
			
		||||
			dma_resv_add_excl_fence(obj->resv,
 | 
			
		||||
							  submit->out_fence);
 | 
			
		||||
		else
 | 
			
		||||
			reservation_object_add_shared_fence(obj->resv,
 | 
			
		||||
			dma_resv_add_shared_fence(obj->resv,
 | 
			
		||||
							    submit->out_fence);
 | 
			
		||||
 | 
			
		||||
		submit_unlock_object(submit, i);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -29,7 +29,7 @@
 | 
			
		|||
#include <linux/intel-iommu.h>
 | 
			
		||||
#include <linux/kernel.h>
 | 
			
		||||
#include <linux/module.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
#include <linux/slab.h>
 | 
			
		||||
#include <linux/vgaarb.h>
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -14317,7 +14317,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 | 
			
		|||
		if (ret < 0)
 | 
			
		||||
			return ret;
 | 
			
		||||
 | 
			
		||||
		fence = reservation_object_get_excl_rcu(obj->base.resv);
 | 
			
		||||
		fence = dma_resv_get_excl_rcu(obj->base.resv);
 | 
			
		||||
		if (fence) {
 | 
			
		||||
			add_rps_boost_after_vblank(new_state->crtc, fence);
 | 
			
		||||
			dma_fence_put(fence);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -82,7 +82,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 | 
			
		|||
{
 | 
			
		||||
	struct drm_i915_gem_busy *args = data;
 | 
			
		||||
	struct drm_i915_gem_object *obj;
 | 
			
		||||
	struct reservation_object_list *list;
 | 
			
		||||
	struct dma_resv_list *list;
 | 
			
		||||
	unsigned int i, shared_count;
 | 
			
		||||
	struct dma_fence *excl;
 | 
			
		||||
	int err;
 | 
			
		||||
| 
						 | 
				
			
			@ -106,11 +106,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 | 
			
		|||
	 * Alternatively, we can trade that extra information on read/write
 | 
			
		||||
	 * activity with
 | 
			
		||||
	 *	args->busy =
 | 
			
		||||
	 *		!reservation_object_test_signaled_rcu(obj->resv, true);
 | 
			
		||||
	 *		!dma_resv_test_signaled_rcu(obj->resv, true);
 | 
			
		||||
	 * to report the overall busyness. This is what the wait-ioctl does.
 | 
			
		||||
	 *
 | 
			
		||||
	 */
 | 
			
		||||
	reservation_object_fences(obj->base.resv, &excl, &list, &shared_count);
 | 
			
		||||
	dma_resv_fences(obj->base.resv, &excl, &list, &shared_count);
 | 
			
		||||
 | 
			
		||||
	/* Translate the exclusive fence to the READ *and* WRITE engine */
 | 
			
		||||
	args->busy = busy_check_writer(excl);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -147,7 +147,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
 | 
			
		|||
						true, I915_FENCE_TIMEOUT,
 | 
			
		||||
						I915_FENCE_GFP);
 | 
			
		||||
 | 
			
		||||
		reservation_object_add_excl_fence(obj->base.resv,
 | 
			
		||||
		dma_resv_add_excl_fence(obj->base.resv,
 | 
			
		||||
						  &clflush->dma);
 | 
			
		||||
 | 
			
		||||
		i915_sw_fence_commit(&clflush->wait);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -288,7 +288,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
 | 
			
		|||
	if (err < 0) {
 | 
			
		||||
		dma_fence_set_error(&work->dma, err);
 | 
			
		||||
	} else {
 | 
			
		||||
		reservation_object_add_excl_fence(obj->base.resv, &work->dma);
 | 
			
		||||
		dma_resv_add_excl_fence(obj->base.resv, &work->dma);
 | 
			
		||||
		err = 0;
 | 
			
		||||
	}
 | 
			
		||||
	i915_gem_object_unlock(obj);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -6,7 +6,7 @@
 | 
			
		|||
 | 
			
		||||
#include <linux/dma-buf.h>
 | 
			
		||||
#include <linux/highmem.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
 | 
			
		||||
#include "i915_drv.h"
 | 
			
		||||
#include "i915_gem_object.h"
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5,7 +5,7 @@
 | 
			
		|||
 */
 | 
			
		||||
 | 
			
		||||
#include <linux/intel-iommu.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
#include <linux/sync_file.h>
 | 
			
		||||
#include <linux/uaccess.h>
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1246,7 +1246,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 | 
			
		|||
		goto skip_request;
 | 
			
		||||
 | 
			
		||||
	i915_vma_lock(batch);
 | 
			
		||||
	GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
 | 
			
		||||
	GEM_BUG_ON(!dma_resv_test_signaled_rcu(batch->resv, true));
 | 
			
		||||
	err = i915_vma_move_to_active(batch, rq, 0);
 | 
			
		||||
	i915_vma_unlock(batch);
 | 
			
		||||
	if (err)
 | 
			
		||||
| 
						 | 
				
			
			@ -1317,7 +1317,7 @@ relocate_entry(struct i915_vma *vma,
 | 
			
		|||
 | 
			
		||||
	if (!eb->reloc_cache.vaddr &&
 | 
			
		||||
	    (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
 | 
			
		||||
	     !reservation_object_test_signaled_rcu(vma->resv, true))) {
 | 
			
		||||
	     !dma_resv_test_signaled_rcu(vma->resv, true))) {
 | 
			
		||||
		const unsigned int gen = eb->reloc_cache.gen;
 | 
			
		||||
		unsigned int len;
 | 
			
		||||
		u32 *batch;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -78,7 +78,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
 | 
			
		|||
					    I915_FENCE_GFP) < 0)
 | 
			
		||||
		goto err;
 | 
			
		||||
 | 
			
		||||
	reservation_object_add_excl_fence(obj->base.resv, &stub->dma);
 | 
			
		||||
	dma_resv_add_excl_fence(obj->base.resv, &stub->dma);
 | 
			
		||||
 | 
			
		||||
	return &stub->dma;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -99,22 +99,22 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
 | 
			
		|||
	__drm_gem_object_put(&obj->base);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define assert_object_held(obj) reservation_object_assert_held((obj)->base.resv)
 | 
			
		||||
#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
 | 
			
		||||
 | 
			
		||||
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
 | 
			
		||||
{
 | 
			
		||||
	reservation_object_lock(obj->base.resv, NULL);
 | 
			
		||||
	dma_resv_lock(obj->base.resv, NULL);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline int
 | 
			
		||||
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
 | 
			
		||||
{
 | 
			
		||||
	return reservation_object_lock_interruptible(obj->base.resv, NULL);
 | 
			
		||||
	return dma_resv_lock_interruptible(obj->base.resv, NULL);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
 | 
			
		||||
{
 | 
			
		||||
	reservation_object_unlock(obj->base.resv);
 | 
			
		||||
	dma_resv_unlock(obj->base.resv);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct dma_fence *
 | 
			
		||||
| 
						 | 
				
			
			@ -373,7 +373,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
 | 
			
		|||
	struct dma_fence *fence;
 | 
			
		||||
 | 
			
		||||
	rcu_read_lock();
 | 
			
		||||
	fence = reservation_object_get_excl_rcu(obj->base.resv);
 | 
			
		||||
	fence = dma_resv_get_excl_rcu(obj->base.resv);
 | 
			
		||||
	rcu_read_unlock();
 | 
			
		||||
 | 
			
		||||
	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -31,7 +31,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static long
 | 
			
		||||
i915_gem_object_wait_reservation(struct reservation_object *resv,
 | 
			
		||||
i915_gem_object_wait_reservation(struct dma_resv *resv,
 | 
			
		||||
				 unsigned int flags,
 | 
			
		||||
				 long timeout)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -43,7 +43,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 | 
			
		|||
		unsigned int count, i;
 | 
			
		||||
		int ret;
 | 
			
		||||
 | 
			
		||||
		ret = reservation_object_get_fences_rcu(resv,
 | 
			
		||||
		ret = dma_resv_get_fences_rcu(resv,
 | 
			
		||||
							&excl, &count, &shared);
 | 
			
		||||
		if (ret)
 | 
			
		||||
			return ret;
 | 
			
		||||
| 
						 | 
				
			
			@ -72,7 +72,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 | 
			
		|||
		 */
 | 
			
		||||
		prune_fences = count && timeout >= 0;
 | 
			
		||||
	} else {
 | 
			
		||||
		excl = reservation_object_get_excl_rcu(resv);
 | 
			
		||||
		excl = dma_resv_get_excl_rcu(resv);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (excl && timeout >= 0)
 | 
			
		||||
| 
						 | 
				
			
			@ -84,10 +84,10 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 | 
			
		|||
	 * Opportunistically prune the fences iff we know they have *all* been
 | 
			
		||||
	 * signaled.
 | 
			
		||||
	 */
 | 
			
		||||
	if (prune_fences && reservation_object_trylock(resv)) {
 | 
			
		||||
		if (reservation_object_test_signaled_rcu(resv, true))
 | 
			
		||||
			reservation_object_add_excl_fence(resv, NULL);
 | 
			
		||||
		reservation_object_unlock(resv);
 | 
			
		||||
	if (prune_fences && dma_resv_trylock(resv)) {
 | 
			
		||||
		if (dma_resv_test_signaled_rcu(resv, true))
 | 
			
		||||
			dma_resv_add_excl_fence(resv, NULL);
 | 
			
		||||
		dma_resv_unlock(resv);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return timeout;
 | 
			
		||||
| 
						 | 
				
			
			@ -140,7 +140,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 | 
			
		|||
		unsigned int count, i;
 | 
			
		||||
		int ret;
 | 
			
		||||
 | 
			
		||||
		ret = reservation_object_get_fences_rcu(obj->base.resv,
 | 
			
		||||
		ret = dma_resv_get_fences_rcu(obj->base.resv,
 | 
			
		||||
							&excl, &count, &shared);
 | 
			
		||||
		if (ret)
 | 
			
		||||
			return ret;
 | 
			
		||||
| 
						 | 
				
			
			@ -152,7 +152,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 | 
			
		|||
 | 
			
		||||
		kfree(shared);
 | 
			
		||||
	} else {
 | 
			
		||||
		excl = reservation_object_get_excl_rcu(obj->base.resv);
 | 
			
		||||
		excl = dma_resv_get_excl_rcu(obj->base.resv);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (excl) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -43,7 +43,7 @@
 | 
			
		|||
#include <linux/mm_types.h>
 | 
			
		||||
#include <linux/perf_event.h>
 | 
			
		||||
#include <linux/pm_qos.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
#include <linux/shmem_fs.h>
 | 
			
		||||
#include <linux/stackdepot.h>
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -29,7 +29,7 @@
 | 
			
		|||
#include <drm/i915_drm.h>
 | 
			
		||||
#include <linux/dma-fence-array.h>
 | 
			
		||||
#include <linux/kthread.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
#include <linux/shmem_fs.h>
 | 
			
		||||
#include <linux/slab.h>
 | 
			
		||||
#include <linux/stop_machine.h>
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -96,9 +96,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 | 
			
		|||
	list_for_each_entry(obj, list, batch_pool_link) {
 | 
			
		||||
		/* The batches are strictly LRU ordered */
 | 
			
		||||
		if (i915_gem_object_is_active(obj)) {
 | 
			
		||||
			struct reservation_object *resv = obj->base.resv;
 | 
			
		||||
			struct dma_resv *resv = obj->base.resv;
 | 
			
		||||
 | 
			
		||||
			if (!reservation_object_test_signaled_rcu(resv, true))
 | 
			
		||||
			if (!dma_resv_test_signaled_rcu(resv, true))
 | 
			
		||||
				break;
 | 
			
		||||
 | 
			
		||||
			i915_retire_requests(pool->engine->i915);
 | 
			
		||||
| 
						 | 
				
			
			@ -113,13 +113,13 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 | 
			
		|||
			 * than replace the existing fence.
 | 
			
		||||
			 */
 | 
			
		||||
			if (rcu_access_pointer(resv->fence)) {
 | 
			
		||||
				reservation_object_lock(resv, NULL);
 | 
			
		||||
				reservation_object_add_excl_fence(resv, NULL);
 | 
			
		||||
				reservation_object_unlock(resv);
 | 
			
		||||
				dma_resv_lock(resv, NULL);
 | 
			
		||||
				dma_resv_add_excl_fence(resv, NULL);
 | 
			
		||||
				dma_resv_unlock(resv);
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->base.resv,
 | 
			
		||||
		GEM_BUG_ON(!dma_resv_test_signaled_rcu(obj->base.resv,
 | 
			
		||||
								 true));
 | 
			
		||||
 | 
			
		||||
		if (obj->base.size >= size)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1027,7 +1027,7 @@ i915_request_await_object(struct i915_request *to,
 | 
			
		|||
		struct dma_fence **shared;
 | 
			
		||||
		unsigned int count, i;
 | 
			
		||||
 | 
			
		||||
		ret = reservation_object_get_fences_rcu(obj->base.resv,
 | 
			
		||||
		ret = dma_resv_get_fences_rcu(obj->base.resv,
 | 
			
		||||
							&excl, &count, &shared);
 | 
			
		||||
		if (ret)
 | 
			
		||||
			return ret;
 | 
			
		||||
| 
						 | 
				
			
			@ -1044,7 +1044,7 @@ i915_request_await_object(struct i915_request *to,
 | 
			
		|||
			dma_fence_put(shared[i]);
 | 
			
		||||
		kfree(shared);
 | 
			
		||||
	} else {
 | 
			
		||||
		excl = reservation_object_get_excl_rcu(obj->base.resv);
 | 
			
		||||
		excl = dma_resv_get_excl_rcu(obj->base.resv);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (excl) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -7,7 +7,7 @@
 | 
			
		|||
#include <linux/slab.h>
 | 
			
		||||
#include <linux/dma-fence.h>
 | 
			
		||||
#include <linux/irq_work.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
 | 
			
		||||
#include "i915_sw_fence.h"
 | 
			
		||||
#include "i915_selftest.h"
 | 
			
		||||
| 
						 | 
				
			
			@ -510,7 +510,7 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 | 
			
		||||
				    struct reservation_object *resv,
 | 
			
		||||
				    struct dma_resv *resv,
 | 
			
		||||
				    const struct dma_fence_ops *exclude,
 | 
			
		||||
				    bool write,
 | 
			
		||||
				    unsigned long timeout,
 | 
			
		||||
| 
						 | 
				
			
			@ -526,7 +526,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 | 
			
		|||
		struct dma_fence **shared;
 | 
			
		||||
		unsigned int count, i;
 | 
			
		||||
 | 
			
		||||
		ret = reservation_object_get_fences_rcu(resv,
 | 
			
		||||
		ret = dma_resv_get_fences_rcu(resv,
 | 
			
		||||
							&excl, &count, &shared);
 | 
			
		||||
		if (ret)
 | 
			
		||||
			return ret;
 | 
			
		||||
| 
						 | 
				
			
			@ -551,7 +551,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 | 
			
		|||
			dma_fence_put(shared[i]);
 | 
			
		||||
		kfree(shared);
 | 
			
		||||
	} else {
 | 
			
		||||
		excl = reservation_object_get_excl_rcu(resv);
 | 
			
		||||
		excl = dma_resv_get_excl_rcu(resv);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (ret >= 0 && excl && excl->ops != exclude) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -16,7 +16,7 @@
 | 
			
		|||
#include <linux/wait.h>
 | 
			
		||||
 | 
			
		||||
struct completion;
 | 
			
		||||
struct reservation_object;
 | 
			
		||||
struct dma_resv;
 | 
			
		||||
 | 
			
		||||
struct i915_sw_fence {
 | 
			
		||||
	wait_queue_head_t wait;
 | 
			
		||||
| 
						 | 
				
			
			@ -82,7 +82,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 | 
			
		|||
				  gfp_t gfp);
 | 
			
		||||
 | 
			
		||||
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 | 
			
		||||
				    struct reservation_object *resv,
 | 
			
		||||
				    struct dma_resv *resv,
 | 
			
		||||
				    const struct dma_fence_ops *exclude,
 | 
			
		||||
				    bool write,
 | 
			
		||||
				    unsigned long timeout,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -99,10 +99,10 @@ static void __i915_vma_retire(struct i915_active *ref)
 | 
			
		|||
		return;
 | 
			
		||||
 | 
			
		||||
	/* Prune the shared fence arrays iff completely idle (inc. external) */
 | 
			
		||||
	if (reservation_object_trylock(obj->base.resv)) {
 | 
			
		||||
		if (reservation_object_test_signaled_rcu(obj->base.resv, true))
 | 
			
		||||
			reservation_object_add_excl_fence(obj->base.resv, NULL);
 | 
			
		||||
		reservation_object_unlock(obj->base.resv);
 | 
			
		||||
	if (dma_resv_trylock(obj->base.resv)) {
 | 
			
		||||
		if (dma_resv_test_signaled_rcu(obj->base.resv, true))
 | 
			
		||||
			dma_resv_add_excl_fence(obj->base.resv, NULL);
 | 
			
		||||
		dma_resv_unlock(obj->base.resv);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			@ -903,7 +903,7 @@ static void export_fence(struct i915_vma *vma,
 | 
			
		|||
			 struct i915_request *rq,
 | 
			
		||||
			 unsigned int flags)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object *resv = vma->resv;
 | 
			
		||||
	struct dma_resv *resv = vma->resv;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Ignore errors from failing to allocate the new fence, we can't
 | 
			
		||||
| 
						 | 
				
			
			@ -911,9 +911,9 @@ static void export_fence(struct i915_vma *vma,
 | 
			
		|||
	 * synchronisation leading to rendering corruption.
 | 
			
		||||
	 */
 | 
			
		||||
	if (flags & EXEC_OBJECT_WRITE)
 | 
			
		||||
		reservation_object_add_excl_fence(resv, &rq->fence);
 | 
			
		||||
	else if (reservation_object_reserve_shared(resv, 1) == 0)
 | 
			
		||||
		reservation_object_add_shared_fence(resv, &rq->fence);
 | 
			
		||||
		dma_resv_add_excl_fence(resv, &rq->fence);
 | 
			
		||||
	else if (dma_resv_reserve_shared(resv, 1) == 0)
 | 
			
		||||
		dma_resv_add_shared_fence(resv, &rq->fence);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int i915_vma_move_to_active(struct i915_vma *vma,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -55,7 +55,7 @@ struct i915_vma {
 | 
			
		|||
	struct i915_address_space *vm;
 | 
			
		||||
	const struct i915_vma_ops *ops;
 | 
			
		||||
	struct i915_fence_reg *fence;
 | 
			
		||||
	struct reservation_object *resv; /** Alias of obj->resv */
 | 
			
		||||
	struct dma_resv *resv; /** Alias of obj->resv */
 | 
			
		||||
	struct sg_table *pages;
 | 
			
		||||
	void __iomem *iomap;
 | 
			
		||||
	void *private; /* owned by creator */
 | 
			
		||||
| 
						 | 
				
			
			@ -299,16 +299,16 @@ void i915_vma_close(struct i915_vma *vma);
 | 
			
		|||
void i915_vma_reopen(struct i915_vma *vma);
 | 
			
		||||
void i915_vma_destroy(struct i915_vma *vma);
 | 
			
		||||
 | 
			
		||||
#define assert_vma_held(vma) reservation_object_assert_held((vma)->resv)
 | 
			
		||||
#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv)
 | 
			
		||||
 | 
			
		||||
static inline void i915_vma_lock(struct i915_vma *vma)
 | 
			
		||||
{
 | 
			
		||||
	reservation_object_lock(vma->resv, NULL);
 | 
			
		||||
	dma_resv_lock(vma->resv, NULL);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void i915_vma_unlock(struct i915_vma *vma)
 | 
			
		||||
{
 | 
			
		||||
	reservation_object_unlock(vma->resv);
 | 
			
		||||
	dma_resv_unlock(vma->resv);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int __i915_vma_do_pin(struct i915_vma *vma,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -136,7 +136,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
 | 
			
		|||
	int err = 0;
 | 
			
		||||
 | 
			
		||||
	if (!write) {
 | 
			
		||||
		err = reservation_object_reserve_shared(bo->gem.resv, 1);
 | 
			
		||||
		err = dma_resv_reserve_shared(bo->gem.resv, 1);
 | 
			
		||||
		if (err)
 | 
			
		||||
			return err;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -296,9 +296,9 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
 | 
			
		|||
 | 
			
		||||
	for (i = 0; i < submit->nr_bos; i++) {
 | 
			
		||||
		if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
 | 
			
		||||
			reservation_object_add_excl_fence(bos[i]->gem.resv, fence);
 | 
			
		||||
			dma_resv_add_excl_fence(bos[i]->gem.resv, fence);
 | 
			
		||||
		else
 | 
			
		||||
			reservation_object_add_shared_fence(bos[i]->gem.resv, fence);
 | 
			
		||||
			dma_resv_add_shared_fence(bos[i]->gem.resv, fence);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
 | 
			
		||||
| 
						 | 
				
			
			@ -341,7 +341,7 @@ int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
 | 
			
		|||
 | 
			
		||||
	timeout = drm_timeout_abs_to_jiffies(timeout_ns);
 | 
			
		||||
 | 
			
		||||
	ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
 | 
			
		||||
	ret = drm_gem_dma_resv_wait(file, handle, write, timeout);
 | 
			
		||||
	if (ret == 0)
 | 
			
		||||
		ret = timeout ? -ETIMEDOUT : -EBUSY;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -4,7 +4,7 @@
 | 
			
		|||
 */
 | 
			
		||||
 | 
			
		||||
#include <linux/dma-buf.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
 | 
			
		||||
#include <drm/drm_modeset_helper.h>
 | 
			
		||||
#include <drm/drm_fb_helper.h>
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -663,13 +663,13 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
 | 
			
		|||
int msm_gem_sync_object(struct drm_gem_object *obj,
 | 
			
		||||
		struct msm_fence_context *fctx, bool exclusive)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object_list *fobj;
 | 
			
		||||
	struct dma_resv_list *fobj;
 | 
			
		||||
	struct dma_fence *fence;
 | 
			
		||||
	int i, ret;
 | 
			
		||||
 | 
			
		||||
	fobj = reservation_object_get_list(obj->resv);
 | 
			
		||||
	fobj = dma_resv_get_list(obj->resv);
 | 
			
		||||
	if (!fobj || (fobj->shared_count == 0)) {
 | 
			
		||||
		fence = reservation_object_get_excl(obj->resv);
 | 
			
		||||
		fence = dma_resv_get_excl(obj->resv);
 | 
			
		||||
		/* don't need to wait on our own fences, since ring is fifo */
 | 
			
		||||
		if (fence && (fence->context != fctx->context)) {
 | 
			
		||||
			ret = dma_fence_wait(fence, true);
 | 
			
		||||
| 
						 | 
				
			
			@ -683,7 +683,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
 | 
			
		|||
 | 
			
		||||
	for (i = 0; i < fobj->shared_count; i++) {
 | 
			
		||||
		fence = rcu_dereference_protected(fobj->shared[i],
 | 
			
		||||
						reservation_object_held(obj->resv));
 | 
			
		||||
						dma_resv_held(obj->resv));
 | 
			
		||||
		if (fence->context != fctx->context) {
 | 
			
		||||
			ret = dma_fence_wait(fence, true);
 | 
			
		||||
			if (ret)
 | 
			
		||||
| 
						 | 
				
			
			@ -701,9 +701,9 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
 | 
			
		|||
	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
 | 
			
		||||
	msm_obj->gpu = gpu;
 | 
			
		||||
	if (exclusive)
 | 
			
		||||
		reservation_object_add_excl_fence(obj->resv, fence);
 | 
			
		||||
		dma_resv_add_excl_fence(obj->resv, fence);
 | 
			
		||||
	else
 | 
			
		||||
		reservation_object_add_shared_fence(obj->resv, fence);
 | 
			
		||||
		dma_resv_add_shared_fence(obj->resv, fence);
 | 
			
		||||
	list_del_init(&msm_obj->mm_list);
 | 
			
		||||
	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -728,7 +728,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 | 
			
		|||
		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 | 
			
		||||
	long ret;
 | 
			
		||||
 | 
			
		||||
	ret = reservation_object_wait_timeout_rcu(obj->resv, write,
 | 
			
		||||
	ret = dma_resv_wait_timeout_rcu(obj->resv, write,
 | 
			
		||||
						  true,  remain);
 | 
			
		||||
	if (ret == 0)
 | 
			
		||||
		return remain == 0 ? -EBUSY : -ETIMEDOUT;
 | 
			
		||||
| 
						 | 
				
			
			@ -760,8 +760,8 @@ static void describe_fence(struct dma_fence *fence, const char *type,
 | 
			
		|||
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 | 
			
		||||
{
 | 
			
		||||
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 | 
			
		||||
	struct reservation_object *robj = obj->resv;
 | 
			
		||||
	struct reservation_object_list *fobj;
 | 
			
		||||
	struct dma_resv *robj = obj->resv;
 | 
			
		||||
	struct dma_resv_list *fobj;
 | 
			
		||||
	struct dma_fence *fence;
 | 
			
		||||
	struct msm_gem_vma *vma;
 | 
			
		||||
	uint64_t off = drm_vma_node_start(&obj->vma_node);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -8,7 +8,7 @@
 | 
			
		|||
#define __MSM_GEM_H__
 | 
			
		||||
 | 
			
		||||
#include <linux/kref.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
#include "msm_drv.h"
 | 
			
		||||
 | 
			
		||||
/* Additional internal-use only BO flags: */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -225,7 +225,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
 | 
			
		|||
			 * strange place to call it.  OTOH this is a
 | 
			
		||||
			 * convenient can-fail point to hook it in.
 | 
			
		||||
			 */
 | 
			
		||||
			ret = reservation_object_reserve_shared(msm_obj->base.resv,
 | 
			
		||||
			ret = dma_resv_reserve_shared(msm_obj->base.resv,
 | 
			
		||||
								1);
 | 
			
		||||
			if (ret)
 | 
			
		||||
				return ret;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -17,7 +17,7 @@
 | 
			
		|||
#include <linux/of_graph.h>
 | 
			
		||||
#include <linux/of_reserved_mem.h>
 | 
			
		||||
#include <linux/pm_runtime.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
#include <linux/spinlock.h>
 | 
			
		||||
 | 
			
		||||
#include <drm/drm_atomic.h>
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -457,7 +457,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 | 
			
		|||
		asyw->image.handle[0] = ctxdma->object.handle;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.base.resv);
 | 
			
		||||
	asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv);
 | 
			
		||||
	asyw->image.offset[0] = fb->nvbo->bo.offset;
 | 
			
		||||
 | 
			
		||||
	if (wndw->func->prepare) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -188,7 +188,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
 | 
			
		|||
int
 | 
			
		||||
nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
 | 
			
		||||
	       uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
 | 
			
		||||
	       struct sg_table *sg, struct reservation_object *robj,
 | 
			
		||||
	       struct sg_table *sg, struct dma_resv *robj,
 | 
			
		||||
	       struct nouveau_bo **pnvbo)
 | 
			
		||||
{
 | 
			
		||||
	struct nouveau_drm *drm = cli->drm;
 | 
			
		||||
| 
						 | 
				
			
			@ -1324,7 +1324,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
 | 
			
		|||
{
 | 
			
		||||
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 | 
			
		||||
	struct drm_device *dev = drm->dev;
 | 
			
		||||
	struct dma_fence *fence = reservation_object_get_excl(bo->base.resv);
 | 
			
		||||
	struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
 | 
			
		||||
 | 
			
		||||
	nv10_bo_put_tile_region(dev, *old_tile, fence);
 | 
			
		||||
	*old_tile = new_tile;
 | 
			
		||||
| 
						 | 
				
			
			@ -1655,12 +1655,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
 | 
			
		|||
void
 | 
			
		||||
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object *resv = nvbo->bo.base.resv;
 | 
			
		||||
	struct dma_resv *resv = nvbo->bo.base.resv;
 | 
			
		||||
 | 
			
		||||
	if (exclusive)
 | 
			
		||||
		reservation_object_add_excl_fence(resv, &fence->base);
 | 
			
		||||
		dma_resv_add_excl_fence(resv, &fence->base);
 | 
			
		||||
	else if (fence)
 | 
			
		||||
		reservation_object_add_shared_fence(resv, &fence->base);
 | 
			
		||||
		dma_resv_add_shared_fence(resv, &fence->base);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct ttm_bo_driver nouveau_bo_driver = {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -73,7 +73,7 @@ extern struct ttm_bo_driver nouveau_bo_driver;
 | 
			
		|||
void nouveau_bo_move_init(struct nouveau_drm *);
 | 
			
		||||
int  nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags,
 | 
			
		||||
		    u32 tile_mode, u32 tile_flags, struct sg_table *sg,
 | 
			
		||||
		    struct reservation_object *robj,
 | 
			
		||||
		    struct dma_resv *robj,
 | 
			
		||||
		    struct nouveau_bo **);
 | 
			
		||||
int  nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig);
 | 
			
		||||
int  nouveau_bo_unpin(struct nouveau_bo *);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -335,20 +335,20 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 | 
			
		|||
{
 | 
			
		||||
	struct nouveau_fence_chan *fctx = chan->fence;
 | 
			
		||||
	struct dma_fence *fence;
 | 
			
		||||
	struct reservation_object *resv = nvbo->bo.base.resv;
 | 
			
		||||
	struct reservation_object_list *fobj;
 | 
			
		||||
	struct dma_resv *resv = nvbo->bo.base.resv;
 | 
			
		||||
	struct dma_resv_list *fobj;
 | 
			
		||||
	struct nouveau_fence *f;
 | 
			
		||||
	int ret = 0, i;
 | 
			
		||||
 | 
			
		||||
	if (!exclusive) {
 | 
			
		||||
		ret = reservation_object_reserve_shared(resv, 1);
 | 
			
		||||
		ret = dma_resv_reserve_shared(resv, 1);
 | 
			
		||||
 | 
			
		||||
		if (ret)
 | 
			
		||||
			return ret;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fobj = reservation_object_get_list(resv);
 | 
			
		||||
	fence = reservation_object_get_excl(resv);
 | 
			
		||||
	fobj = dma_resv_get_list(resv);
 | 
			
		||||
	fence = dma_resv_get_excl(resv);
 | 
			
		||||
 | 
			
		||||
	if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
 | 
			
		||||
		struct nouveau_channel *prev = NULL;
 | 
			
		||||
| 
						 | 
				
			
			@ -377,7 +377,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
 | 
			
		|||
		bool must_wait = true;
 | 
			
		||||
 | 
			
		||||
		fence = rcu_dereference_protected(fobj->shared[i],
 | 
			
		||||
						reservation_object_held(resv));
 | 
			
		||||
						dma_resv_held(resv));
 | 
			
		||||
 | 
			
		||||
		f = nouveau_local_fence(fence, chan->drm);
 | 
			
		||||
		if (f) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -887,7 +887,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
 | 
			
		|||
		return -ENOENT;
 | 
			
		||||
	nvbo = nouveau_gem_object(gem);
 | 
			
		||||
 | 
			
		||||
	lret = reservation_object_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
 | 
			
		||||
	lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
 | 
			
		||||
						   no_wait ? 0 : 30 * HZ);
 | 
			
		||||
	if (!lret)
 | 
			
		||||
		ret = -EBUSY;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -62,16 +62,16 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
 | 
			
		|||
{
 | 
			
		||||
	struct nouveau_drm *drm = nouveau_drm(dev);
 | 
			
		||||
	struct nouveau_bo *nvbo;
 | 
			
		||||
	struct reservation_object *robj = attach->dmabuf->resv;
 | 
			
		||||
	struct dma_resv *robj = attach->dmabuf->resv;
 | 
			
		||||
	u32 flags = 0;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	flags = TTM_PL_FLAG_TT;
 | 
			
		||||
 | 
			
		||||
	reservation_object_lock(robj, NULL);
 | 
			
		||||
	dma_resv_lock(robj, NULL);
 | 
			
		||||
	ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0,
 | 
			
		||||
			     sg, robj, &nvbo);
 | 
			
		||||
	reservation_object_unlock(robj);
 | 
			
		||||
	dma_resv_unlock(robj);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return ERR_PTR(ret);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -274,7 +274,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
 | 
			
		|||
	if (!gem_obj)
 | 
			
		||||
		return -ENOENT;
 | 
			
		||||
 | 
			
		||||
	ret = reservation_object_wait_timeout_rcu(gem_obj->resv, true,
 | 
			
		||||
	ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
 | 
			
		||||
						  true, timeout);
 | 
			
		||||
	if (!ret)
 | 
			
		||||
		ret = timeout ? -ETIMEDOUT : -EBUSY;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -6,7 +6,7 @@
 | 
			
		|||
#include <linux/io.h>
 | 
			
		||||
#include <linux/platform_device.h>
 | 
			
		||||
#include <linux/pm_runtime.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
#include <drm/gpu_scheduler.h>
 | 
			
		||||
#include <drm/panfrost_drm.h>
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -199,7 +199,7 @@ static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
 | 
			
		|||
	int i;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < bo_count; i++)
 | 
			
		||||
		implicit_fences[i] = reservation_object_get_excl_rcu(bos[i]->resv);
 | 
			
		||||
		implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void panfrost_attach_object_fences(struct drm_gem_object **bos,
 | 
			
		||||
| 
						 | 
				
			
			@ -209,7 +209,7 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos,
 | 
			
		|||
	int i;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < bo_count; i++)
 | 
			
		||||
		reservation_object_add_excl_fence(bos[i]->resv, fence);
 | 
			
		||||
		dma_resv_add_excl_fence(bos[i]->resv, fence);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int panfrost_job_push(struct panfrost_job *job)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -57,7 +57,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
 | 
			
		|||
	struct qxl_bo *bo;
 | 
			
		||||
 | 
			
		||||
	list_for_each_entry(bo, &qdev->gem.objects, list) {
 | 
			
		||||
		struct reservation_object_list *fobj;
 | 
			
		||||
		struct dma_resv_list *fobj;
 | 
			
		||||
		int rel;
 | 
			
		||||
 | 
			
		||||
		rcu_read_lock();
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -238,7 +238,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
 | 
			
		|||
			return ret;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = reservation_object_reserve_shared(bo->tbo.base.resv, 1);
 | 
			
		||||
	ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return ret;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -458,9 +458,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
 | 
			
		|||
	list_for_each_entry(entry, &release->bos, head) {
 | 
			
		||||
		bo = entry->bo;
 | 
			
		||||
 | 
			
		||||
		reservation_object_add_shared_fence(bo->base.resv, &release->base);
 | 
			
		||||
		dma_resv_add_shared_fence(bo->base.resv, &release->base);
 | 
			
		||||
		ttm_bo_add_to_lru(bo);
 | 
			
		||||
		reservation_object_unlock(bo->base.resv);
 | 
			
		||||
		dma_resv_unlock(bo->base.resv);
 | 
			
		||||
	}
 | 
			
		||||
	spin_unlock(&glob->lru_lock);
 | 
			
		||||
	ww_acquire_fini(&release->ticket);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3659,7 +3659,7 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
 | 
			
		|||
struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
 | 
			
		||||
				    uint64_t src_offset, uint64_t dst_offset,
 | 
			
		||||
				    unsigned num_gpu_pages,
 | 
			
		||||
				    struct reservation_object *resv)
 | 
			
		||||
				    struct dma_resv *resv)
 | 
			
		||||
{
 | 
			
		||||
	struct radeon_fence *fence;
 | 
			
		||||
	struct radeon_sync sync;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -579,7 +579,7 @@ void cik_sdma_fini(struct radeon_device *rdev)
 | 
			
		|||
struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
 | 
			
		||||
				  uint64_t src_offset, uint64_t dst_offset,
 | 
			
		||||
				  unsigned num_gpu_pages,
 | 
			
		||||
				  struct reservation_object *resv)
 | 
			
		||||
				  struct dma_resv *resv)
 | 
			
		||||
{
 | 
			
		||||
	struct radeon_fence *fence;
 | 
			
		||||
	struct radeon_sync sync;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -108,7 +108,7 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
 | 
			
		|||
					uint64_t src_offset,
 | 
			
		||||
					uint64_t dst_offset,
 | 
			
		||||
					unsigned num_gpu_pages,
 | 
			
		||||
					struct reservation_object *resv)
 | 
			
		||||
					struct dma_resv *resv)
 | 
			
		||||
{
 | 
			
		||||
	struct radeon_fence *fence;
 | 
			
		||||
	struct radeon_sync sync;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -891,7 +891,7 @@ struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
 | 
			
		|||
				    uint64_t src_offset,
 | 
			
		||||
				    uint64_t dst_offset,
 | 
			
		||||
				    unsigned num_gpu_pages,
 | 
			
		||||
				    struct reservation_object *resv)
 | 
			
		||||
				    struct dma_resv *resv)
 | 
			
		||||
{
 | 
			
		||||
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 | 
			
		||||
	struct radeon_fence *fence;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -84,7 +84,7 @@ struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
 | 
			
		|||
				   uint64_t src_offset,
 | 
			
		||||
				   uint64_t dst_offset,
 | 
			
		||||
				   unsigned num_gpu_pages,
 | 
			
		||||
				   struct reservation_object *resv)
 | 
			
		||||
				   struct dma_resv *resv)
 | 
			
		||||
{
 | 
			
		||||
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 | 
			
		||||
	struct radeon_fence *fence;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2963,7 +2963,7 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
 | 
			
		|||
struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
 | 
			
		||||
				     uint64_t src_offset, uint64_t dst_offset,
 | 
			
		||||
				     unsigned num_gpu_pages,
 | 
			
		||||
				     struct reservation_object *resv)
 | 
			
		||||
				     struct dma_resv *resv)
 | 
			
		||||
{
 | 
			
		||||
	struct radeon_fence *fence;
 | 
			
		||||
	struct radeon_sync sync;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -444,7 +444,7 @@ void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 | 
			
		|||
struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
 | 
			
		||||
				   uint64_t src_offset, uint64_t dst_offset,
 | 
			
		||||
				   unsigned num_gpu_pages,
 | 
			
		||||
				   struct reservation_object *resv)
 | 
			
		||||
				   struct dma_resv *resv)
 | 
			
		||||
{
 | 
			
		||||
	struct radeon_fence *fence;
 | 
			
		||||
	struct radeon_sync sync;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -619,7 +619,7 @@ void radeon_sync_fence(struct radeon_sync *sync,
 | 
			
		|||
		       struct radeon_fence *fence);
 | 
			
		||||
int radeon_sync_resv(struct radeon_device *rdev,
 | 
			
		||||
		     struct radeon_sync *sync,
 | 
			
		||||
		     struct reservation_object *resv,
 | 
			
		||||
		     struct dma_resv *resv,
 | 
			
		||||
		     bool shared);
 | 
			
		||||
int radeon_sync_rings(struct radeon_device *rdev,
 | 
			
		||||
		      struct radeon_sync *sync,
 | 
			
		||||
| 
						 | 
				
			
			@ -1912,20 +1912,20 @@ struct radeon_asic {
 | 
			
		|||
					     uint64_t src_offset,
 | 
			
		||||
					     uint64_t dst_offset,
 | 
			
		||||
					     unsigned num_gpu_pages,
 | 
			
		||||
					     struct reservation_object *resv);
 | 
			
		||||
					     struct dma_resv *resv);
 | 
			
		||||
		u32 blit_ring_index;
 | 
			
		||||
		struct radeon_fence *(*dma)(struct radeon_device *rdev,
 | 
			
		||||
					    uint64_t src_offset,
 | 
			
		||||
					    uint64_t dst_offset,
 | 
			
		||||
					    unsigned num_gpu_pages,
 | 
			
		||||
					    struct reservation_object *resv);
 | 
			
		||||
					    struct dma_resv *resv);
 | 
			
		||||
		u32 dma_ring_index;
 | 
			
		||||
		/* method used for bo copy */
 | 
			
		||||
		struct radeon_fence *(*copy)(struct radeon_device *rdev,
 | 
			
		||||
					     uint64_t src_offset,
 | 
			
		||||
					     uint64_t dst_offset,
 | 
			
		||||
					     unsigned num_gpu_pages,
 | 
			
		||||
					     struct reservation_object *resv);
 | 
			
		||||
					     struct dma_resv *resv);
 | 
			
		||||
		/* ring used for bo copies */
 | 
			
		||||
		u32 copy_ring_index;
 | 
			
		||||
	} copy;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -86,7 +86,7 @@ struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
 | 
			
		|||
				    uint64_t src_offset,
 | 
			
		||||
				    uint64_t dst_offset,
 | 
			
		||||
				    unsigned num_gpu_pages,
 | 
			
		||||
				    struct reservation_object *resv);
 | 
			
		||||
				    struct dma_resv *resv);
 | 
			
		||||
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
 | 
			
		||||
			 uint32_t tiling_flags, uint32_t pitch,
 | 
			
		||||
			 uint32_t offset, uint32_t obj_size);
 | 
			
		||||
| 
						 | 
				
			
			@ -157,7 +157,7 @@ struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
 | 
			
		|||
				   uint64_t src_offset,
 | 
			
		||||
				   uint64_t dst_offset,
 | 
			
		||||
				   unsigned num_gpu_pages,
 | 
			
		||||
				   struct reservation_object *resv);
 | 
			
		||||
				   struct dma_resv *resv);
 | 
			
		||||
void r200_set_safe_registers(struct radeon_device *rdev);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -347,11 +347,11 @@ int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
 | 
			
		|||
struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
 | 
			
		||||
				     uint64_t src_offset, uint64_t dst_offset,
 | 
			
		||||
				     unsigned num_gpu_pages,
 | 
			
		||||
				     struct reservation_object *resv);
 | 
			
		||||
				     struct dma_resv *resv);
 | 
			
		||||
struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
 | 
			
		||||
				   uint64_t src_offset, uint64_t dst_offset,
 | 
			
		||||
				   unsigned num_gpu_pages,
 | 
			
		||||
				   struct reservation_object *resv);
 | 
			
		||||
				   struct dma_resv *resv);
 | 
			
		||||
void r600_hpd_init(struct radeon_device *rdev);
 | 
			
		||||
void r600_hpd_fini(struct radeon_device *rdev);
 | 
			
		||||
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
 | 
			
		||||
| 
						 | 
				
			
			@ -473,7 +473,7 @@ void r700_cp_fini(struct radeon_device *rdev);
 | 
			
		|||
struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
 | 
			
		||||
				    uint64_t src_offset, uint64_t dst_offset,
 | 
			
		||||
				    unsigned num_gpu_pages,
 | 
			
		||||
				    struct reservation_object *resv);
 | 
			
		||||
				    struct dma_resv *resv);
 | 
			
		||||
u32 rv770_get_xclk(struct radeon_device *rdev);
 | 
			
		||||
int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
 | 
			
		||||
int rv770_get_temp(struct radeon_device *rdev);
 | 
			
		||||
| 
						 | 
				
			
			@ -547,7 +547,7 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
 | 
			
		|||
struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
 | 
			
		||||
					uint64_t src_offset, uint64_t dst_offset,
 | 
			
		||||
					unsigned num_gpu_pages,
 | 
			
		||||
					struct reservation_object *resv);
 | 
			
		||||
					struct dma_resv *resv);
 | 
			
		||||
int evergreen_get_temp(struct radeon_device *rdev);
 | 
			
		||||
int evergreen_get_allowed_info_register(struct radeon_device *rdev,
 | 
			
		||||
					u32 reg, u32 *val);
 | 
			
		||||
| 
						 | 
				
			
			@ -725,7 +725,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 | 
			
		|||
struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
 | 
			
		||||
				 uint64_t src_offset, uint64_t dst_offset,
 | 
			
		||||
				 unsigned num_gpu_pages,
 | 
			
		||||
				 struct reservation_object *resv);
 | 
			
		||||
				 struct dma_resv *resv);
 | 
			
		||||
 | 
			
		||||
void si_dma_vm_copy_pages(struct radeon_device *rdev,
 | 
			
		||||
			  struct radeon_ib *ib,
 | 
			
		||||
| 
						 | 
				
			
			@ -796,11 +796,11 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 | 
			
		|||
struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
 | 
			
		||||
				  uint64_t src_offset, uint64_t dst_offset,
 | 
			
		||||
				  unsigned num_gpu_pages,
 | 
			
		||||
				  struct reservation_object *resv);
 | 
			
		||||
				  struct dma_resv *resv);
 | 
			
		||||
struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
 | 
			
		||||
				    uint64_t src_offset, uint64_t dst_offset,
 | 
			
		||||
				    unsigned num_gpu_pages,
 | 
			
		||||
				    struct reservation_object *resv);
 | 
			
		||||
				    struct dma_resv *resv);
 | 
			
		||||
int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
 | 
			
		||||
int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
 | 
			
		||||
bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -35,7 +35,7 @@
 | 
			
		|||
static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
 | 
			
		||||
				    uint64_t saddr, uint64_t daddr,
 | 
			
		||||
				    int flag, int n,
 | 
			
		||||
				    struct reservation_object *resv)
 | 
			
		||||
				    struct dma_resv *resv)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long start_jiffies;
 | 
			
		||||
	unsigned long end_jiffies;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -255,7 +255,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
 | 
			
		|||
	int r;
 | 
			
		||||
 | 
			
		||||
	list_for_each_entry(reloc, &p->validated, tv.head) {
 | 
			
		||||
		struct reservation_object *resv;
 | 
			
		||||
		struct dma_resv *resv;
 | 
			
		||||
 | 
			
		||||
		resv = reloc->robj->tbo.base.resv;
 | 
			
		||||
		r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
 | 
			
		|||
		DRM_ERROR("failed to pin new rbo buffer before flip\n");
 | 
			
		||||
		goto cleanup;
 | 
			
		||||
	}
 | 
			
		||||
	work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.base.resv));
 | 
			
		||||
	work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv));
 | 
			
		||||
	radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
 | 
			
		||||
	radeon_bo_unreserve(new_rbo);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -114,7 +114,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
 | 
			
		|||
	}
 | 
			
		||||
	if (domain == RADEON_GEM_DOMAIN_CPU) {
 | 
			
		||||
		/* Asking for cpu access wait for object idle */
 | 
			
		||||
		r = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
 | 
			
		||||
		r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
 | 
			
		||||
		if (!r)
 | 
			
		||||
			r = -EBUSY;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -449,7 +449,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
 | 
			
		|||
	}
 | 
			
		||||
	robj = gem_to_radeon_bo(gobj);
 | 
			
		||||
 | 
			
		||||
	r = reservation_object_test_signaled_rcu(robj->tbo.base.resv, true);
 | 
			
		||||
	r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
 | 
			
		||||
	if (r == 0)
 | 
			
		||||
		r = -EBUSY;
 | 
			
		||||
	else
 | 
			
		||||
| 
						 | 
				
			
			@ -478,7 +478,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 | 
			
		|||
	}
 | 
			
		||||
	robj = gem_to_radeon_bo(gobj);
 | 
			
		||||
 | 
			
		||||
	ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
 | 
			
		||||
	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
 | 
			
		||||
	if (ret == 0)
 | 
			
		||||
		r = -EBUSY;
 | 
			
		||||
	else if (ret < 0)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -163,7 +163,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
 | 
			
		|||
				continue;
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
 | 
			
		||||
			r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
 | 
			
		||||
				true, false, MAX_SCHEDULE_TIMEOUT);
 | 
			
		||||
			if (r <= 0)
 | 
			
		||||
				DRM_ERROR("(%ld) failed to wait for user bo\n", r);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -183,7 +183,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
 | 
			
		|||
int radeon_bo_create(struct radeon_device *rdev,
 | 
			
		||||
		     unsigned long size, int byte_align, bool kernel,
 | 
			
		||||
		     u32 domain, u32 flags, struct sg_table *sg,
 | 
			
		||||
		     struct reservation_object *resv,
 | 
			
		||||
		     struct dma_resv *resv,
 | 
			
		||||
		     struct radeon_bo **bo_ptr)
 | 
			
		||||
{
 | 
			
		||||
	struct radeon_bo *bo;
 | 
			
		||||
| 
						 | 
				
			
			@ -610,7 +610,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
 | 
			
		|||
	int steal;
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	reservation_object_assert_held(bo->tbo.base.resv);
 | 
			
		||||
	dma_resv_assert_held(bo->tbo.base.resv);
 | 
			
		||||
 | 
			
		||||
	if (!bo->tiling_flags)
 | 
			
		||||
		return 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -736,7 +736,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
 | 
			
		|||
				uint32_t *tiling_flags,
 | 
			
		||||
				uint32_t *pitch)
 | 
			
		||||
{
 | 
			
		||||
	reservation_object_assert_held(bo->tbo.base.resv);
 | 
			
		||||
	dma_resv_assert_held(bo->tbo.base.resv);
 | 
			
		||||
 | 
			
		||||
	if (tiling_flags)
 | 
			
		||||
		*tiling_flags = bo->tiling_flags;
 | 
			
		||||
| 
						 | 
				
			
			@ -748,7 +748,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
 | 
			
		|||
				bool force_drop)
 | 
			
		||||
{
 | 
			
		||||
	if (!force_drop)
 | 
			
		||||
		reservation_object_assert_held(bo->tbo.base.resv);
 | 
			
		||||
		dma_resv_assert_held(bo->tbo.base.resv);
 | 
			
		||||
 | 
			
		||||
	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
 | 
			
		||||
		return 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -870,10 +870,10 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
 | 
			
		|||
void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
 | 
			
		||||
		     bool shared)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object *resv = bo->tbo.base.resv;
 | 
			
		||||
	struct dma_resv *resv = bo->tbo.base.resv;
 | 
			
		||||
 | 
			
		||||
	if (shared)
 | 
			
		||||
		reservation_object_add_shared_fence(resv, &fence->base);
 | 
			
		||||
		dma_resv_add_shared_fence(resv, &fence->base);
 | 
			
		||||
	else
 | 
			
		||||
		reservation_object_add_excl_fence(resv, &fence->base);
 | 
			
		||||
		dma_resv_add_excl_fence(resv, &fence->base);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -126,7 +126,7 @@ extern int radeon_bo_create(struct radeon_device *rdev,
 | 
			
		|||
			    unsigned long size, int byte_align,
 | 
			
		||||
			    bool kernel, u32 domain, u32 flags,
 | 
			
		||||
			    struct sg_table *sg,
 | 
			
		||||
			    struct reservation_object *resv,
 | 
			
		||||
			    struct dma_resv *resv,
 | 
			
		||||
			    struct radeon_bo **bo_ptr);
 | 
			
		||||
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
 | 
			
		||||
extern void radeon_bo_kunmap(struct radeon_bo *bo);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -63,15 +63,15 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
 | 
			
		|||
							struct dma_buf_attachment *attach,
 | 
			
		||||
							struct sg_table *sg)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object *resv = attach->dmabuf->resv;
 | 
			
		||||
	struct dma_resv *resv = attach->dmabuf->resv;
 | 
			
		||||
	struct radeon_device *rdev = dev->dev_private;
 | 
			
		||||
	struct radeon_bo *bo;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	reservation_object_lock(resv, NULL);
 | 
			
		||||
	dma_resv_lock(resv, NULL);
 | 
			
		||||
	ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
 | 
			
		||||
			       RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
 | 
			
		||||
	reservation_object_unlock(resv);
 | 
			
		||||
	dma_resv_unlock(resv);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return ERR_PTR(ret);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -87,30 +87,30 @@ void radeon_sync_fence(struct radeon_sync *sync,
 | 
			
		|||
 */
 | 
			
		||||
int radeon_sync_resv(struct radeon_device *rdev,
 | 
			
		||||
		     struct radeon_sync *sync,
 | 
			
		||||
		     struct reservation_object *resv,
 | 
			
		||||
		     struct dma_resv *resv,
 | 
			
		||||
		     bool shared)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object_list *flist;
 | 
			
		||||
	struct dma_resv_list *flist;
 | 
			
		||||
	struct dma_fence *f;
 | 
			
		||||
	struct radeon_fence *fence;
 | 
			
		||||
	unsigned i;
 | 
			
		||||
	int r = 0;
 | 
			
		||||
 | 
			
		||||
	/* always sync to the exclusive fence */
 | 
			
		||||
	f = reservation_object_get_excl(resv);
 | 
			
		||||
	f = dma_resv_get_excl(resv);
 | 
			
		||||
	fence = f ? to_radeon_fence(f) : NULL;
 | 
			
		||||
	if (fence && fence->rdev == rdev)
 | 
			
		||||
		radeon_sync_fence(sync, fence);
 | 
			
		||||
	else if (f)
 | 
			
		||||
		r = dma_fence_wait(f, true);
 | 
			
		||||
 | 
			
		||||
	flist = reservation_object_get_list(resv);
 | 
			
		||||
	flist = dma_resv_get_list(resv);
 | 
			
		||||
	if (shared || !flist || r)
 | 
			
		||||
		return r;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < flist->shared_count; ++i) {
 | 
			
		||||
		f = rcu_dereference_protected(flist->shared[i],
 | 
			
		||||
					      reservation_object_held(resv));
 | 
			
		||||
					      dma_resv_held(resv));
 | 
			
		||||
		fence = to_radeon_fence(f);
 | 
			
		||||
		if (fence && fence->rdev == rdev)
 | 
			
		||||
			radeon_sync_fence(sync, fence);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
 | 
			
		|||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	f = reservation_object_get_excl(bo->tbo.base.resv);
 | 
			
		||||
	f = dma_resv_get_excl(bo->tbo.base.resv);
 | 
			
		||||
	if (f) {
 | 
			
		||||
		r = radeon_fence_wait((struct radeon_fence *)f, false);
 | 
			
		||||
		if (r) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -831,7 +831,7 @@ static int radeon_vm_update_ptes(struct radeon_device *rdev,
 | 
			
		|||
		int r;
 | 
			
		||||
 | 
			
		||||
		radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true);
 | 
			
		||||
		r = reservation_object_reserve_shared(pt->tbo.base.resv, 1);
 | 
			
		||||
		r = dma_resv_reserve_shared(pt->tbo.base.resv, 1);
 | 
			
		||||
		if (r)
 | 
			
		||||
			return r;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -42,7 +42,7 @@
 | 
			
		|||
struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
 | 
			
		||||
				    uint64_t src_offset, uint64_t dst_offset,
 | 
			
		||||
				    unsigned num_gpu_pages,
 | 
			
		||||
				    struct reservation_object *resv)
 | 
			
		||||
				    struct dma_resv *resv)
 | 
			
		||||
{
 | 
			
		||||
	struct radeon_fence *fence;
 | 
			
		||||
	struct radeon_sync sync;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -231,7 +231,7 @@ void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
 | 
			
		|||
struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
 | 
			
		||||
				 uint64_t src_offset, uint64_t dst_offset,
 | 
			
		||||
				 unsigned num_gpu_pages,
 | 
			
		||||
				 struct reservation_object *resv)
 | 
			
		||||
				 struct dma_resv *resv)
 | 
			
		||||
{
 | 
			
		||||
	struct radeon_fence *fence;
 | 
			
		||||
	struct radeon_sync sync;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -41,7 +41,7 @@
 | 
			
		|||
#include <linux/file.h>
 | 
			
		||||
#include <linux/module.h>
 | 
			
		||||
#include <linux/atomic.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
 | 
			
		||||
static void ttm_bo_global_kobj_release(struct kobject *kobj);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -161,7 +161,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
 | 
			
		|||
	atomic_dec(&bo->bdev->glob->bo_count);
 | 
			
		||||
	dma_fence_put(bo->moving);
 | 
			
		||||
	if (!ttm_bo_uses_embedded_gem_object(bo))
 | 
			
		||||
		reservation_object_fini(&bo->base._resv);
 | 
			
		||||
		dma_resv_fini(&bo->base._resv);
 | 
			
		||||
	mutex_destroy(&bo->wu_mutex);
 | 
			
		||||
	bo->destroy(bo);
 | 
			
		||||
	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
 | 
			
		||||
| 
						 | 
				
			
			@ -173,7 +173,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
 | 
			
		|||
	struct ttm_bo_device *bdev = bo->bdev;
 | 
			
		||||
	struct ttm_mem_type_manager *man;
 | 
			
		||||
 | 
			
		||||
	reservation_object_assert_held(bo->base.resv);
 | 
			
		||||
	dma_resv_assert_held(bo->base.resv);
 | 
			
		||||
 | 
			
		||||
	if (!list_empty(&bo->lru))
 | 
			
		||||
		return;
 | 
			
		||||
| 
						 | 
				
			
			@ -244,7 +244,7 @@ static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
 | 
			
		|||
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
 | 
			
		||||
			     struct ttm_lru_bulk_move *bulk)
 | 
			
		||||
{
 | 
			
		||||
	reservation_object_assert_held(bo->base.resv);
 | 
			
		||||
	dma_resv_assert_held(bo->base.resv);
 | 
			
		||||
 | 
			
		||||
	ttm_bo_del_from_lru(bo);
 | 
			
		||||
	ttm_bo_add_to_lru(bo);
 | 
			
		||||
| 
						 | 
				
			
			@ -277,8 +277,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
 | 
			
		|||
		if (!pos->first)
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
		reservation_object_assert_held(pos->first->base.resv);
 | 
			
		||||
		reservation_object_assert_held(pos->last->base.resv);
 | 
			
		||||
		dma_resv_assert_held(pos->first->base.resv);
 | 
			
		||||
		dma_resv_assert_held(pos->last->base.resv);
 | 
			
		||||
 | 
			
		||||
		man = &pos->first->bdev->man[TTM_PL_TT];
 | 
			
		||||
		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
 | 
			
		||||
| 
						 | 
				
			
			@ -292,8 +292,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
 | 
			
		|||
		if (!pos->first)
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
		reservation_object_assert_held(pos->first->base.resv);
 | 
			
		||||
		reservation_object_assert_held(pos->last->base.resv);
 | 
			
		||||
		dma_resv_assert_held(pos->first->base.resv);
 | 
			
		||||
		dma_resv_assert_held(pos->last->base.resv);
 | 
			
		||||
 | 
			
		||||
		man = &pos->first->bdev->man[TTM_PL_VRAM];
 | 
			
		||||
		list_bulk_move_tail(&man->lru[i], &pos->first->lru,
 | 
			
		||||
| 
						 | 
				
			
			@ -307,8 +307,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
 | 
			
		|||
		if (!pos->first)
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
		reservation_object_assert_held(pos->first->base.resv);
 | 
			
		||||
		reservation_object_assert_held(pos->last->base.resv);
 | 
			
		||||
		dma_resv_assert_held(pos->first->base.resv);
 | 
			
		||||
		dma_resv_assert_held(pos->last->base.resv);
 | 
			
		||||
 | 
			
		||||
		lru = &pos->first->bdev->glob->swap_lru[i];
 | 
			
		||||
		list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
 | 
			
		||||
| 
						 | 
				
			
			@ -442,29 +442,29 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
 | 
			
		|||
	if (bo->base.resv == &bo->base._resv)
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	BUG_ON(!reservation_object_trylock(&bo->base._resv));
 | 
			
		||||
	BUG_ON(!dma_resv_trylock(&bo->base._resv));
 | 
			
		||||
 | 
			
		||||
	r = reservation_object_copy_fences(&bo->base._resv, bo->base.resv);
 | 
			
		||||
	r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
 | 
			
		||||
	if (r)
 | 
			
		||||
		reservation_object_unlock(&bo->base._resv);
 | 
			
		||||
		dma_resv_unlock(&bo->base._resv);
 | 
			
		||||
 | 
			
		||||
	return r;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 | 
			
		||||
{
 | 
			
		||||
	struct reservation_object_list *fobj;
 | 
			
		||||
	struct dma_resv_list *fobj;
 | 
			
		||||
	struct dma_fence *fence;
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	fobj = reservation_object_get_list(&bo->base._resv);
 | 
			
		||||
	fence = reservation_object_get_excl(&bo->base._resv);
 | 
			
		||||
	fobj = dma_resv_get_list(&bo->base._resv);
 | 
			
		||||
	fence = dma_resv_get_excl(&bo->base._resv);
 | 
			
		||||
	if (fence && !fence->ops->signaled)
 | 
			
		||||
		dma_fence_enable_sw_signaling(fence);
 | 
			
		||||
 | 
			
		||||
	for (i = 0; fobj && i < fobj->shared_count; ++i) {
 | 
			
		||||
		fence = rcu_dereference_protected(fobj->shared[i],
 | 
			
		||||
					reservation_object_held(bo->base.resv));
 | 
			
		||||
					dma_resv_held(bo->base.resv));
 | 
			
		||||
 | 
			
		||||
		if (!fence->ops->signaled)
 | 
			
		||||
			dma_fence_enable_sw_signaling(fence);
 | 
			
		||||
| 
						 | 
				
			
			@ -482,23 +482,23 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 | 
			
		|||
		/* Last resort, if we fail to allocate memory for the
 | 
			
		||||
		 * fences block for the BO to become idle
 | 
			
		||||
		 */
 | 
			
		||||
		reservation_object_wait_timeout_rcu(bo->base.resv, true, false,
 | 
			
		||||
		dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
 | 
			
		||||
						    30 * HZ);
 | 
			
		||||
		spin_lock(&glob->lru_lock);
 | 
			
		||||
		goto error;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	spin_lock(&glob->lru_lock);
 | 
			
		||||
	ret = reservation_object_trylock(bo->base.resv) ? 0 : -EBUSY;
 | 
			
		||||
	ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
 | 
			
		||||
	if (!ret) {
 | 
			
		||||
		if (reservation_object_test_signaled_rcu(&bo->base._resv, true)) {
 | 
			
		||||
		if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
 | 
			
		||||
			ttm_bo_del_from_lru(bo);
 | 
			
		||||
			spin_unlock(&glob->lru_lock);
 | 
			
		||||
			if (bo->base.resv != &bo->base._resv)
 | 
			
		||||
				reservation_object_unlock(&bo->base._resv);
 | 
			
		||||
				dma_resv_unlock(&bo->base._resv);
 | 
			
		||||
 | 
			
		||||
			ttm_bo_cleanup_memtype_use(bo);
 | 
			
		||||
			reservation_object_unlock(bo->base.resv);
 | 
			
		||||
			dma_resv_unlock(bo->base.resv);
 | 
			
		||||
			return;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -514,10 +514,10 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 | 
			
		|||
			ttm_bo_add_to_lru(bo);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		reservation_object_unlock(bo->base.resv);
 | 
			
		||||
		dma_resv_unlock(bo->base.resv);
 | 
			
		||||
	}
 | 
			
		||||
	if (bo->base.resv != &bo->base._resv)
 | 
			
		||||
		reservation_object_unlock(&bo->base._resv);
 | 
			
		||||
		dma_resv_unlock(&bo->base._resv);
 | 
			
		||||
 | 
			
		||||
error:
 | 
			
		||||
	kref_get(&bo->list_kref);
 | 
			
		||||
| 
						 | 
				
			
			@ -546,7 +546,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 | 
			
		|||
			       bool unlock_resv)
 | 
			
		||||
{
 | 
			
		||||
	struct ttm_bo_global *glob = bo->bdev->glob;
 | 
			
		||||
	struct reservation_object *resv;
 | 
			
		||||
	struct dma_resv *resv;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (unlikely(list_empty(&bo->ddestroy)))
 | 
			
		||||
| 
						 | 
				
			
			@ -554,7 +554,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 | 
			
		|||
	else
 | 
			
		||||
		resv = &bo->base._resv;
 | 
			
		||||
 | 
			
		||||
	if (reservation_object_test_signaled_rcu(resv, true))
 | 
			
		||||
	if (dma_resv_test_signaled_rcu(resv, true))
 | 
			
		||||
		ret = 0;
 | 
			
		||||
	else
 | 
			
		||||
		ret = -EBUSY;
 | 
			
		||||
| 
						 | 
				
			
			@ -563,10 +563,10 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 | 
			
		|||
		long lret;
 | 
			
		||||
 | 
			
		||||
		if (unlock_resv)
 | 
			
		||||
			reservation_object_unlock(bo->base.resv);
 | 
			
		||||
			dma_resv_unlock(bo->base.resv);
 | 
			
		||||
		spin_unlock(&glob->lru_lock);
 | 
			
		||||
 | 
			
		||||
		lret = reservation_object_wait_timeout_rcu(resv, true,
 | 
			
		||||
		lret = dma_resv_wait_timeout_rcu(resv, true,
 | 
			
		||||
							   interruptible,
 | 
			
		||||
							   30 * HZ);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -576,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 | 
			
		|||
			return -EBUSY;
 | 
			
		||||
 | 
			
		||||
		spin_lock(&glob->lru_lock);
 | 
			
		||||
		if (unlock_resv && !reservation_object_trylock(bo->base.resv)) {
 | 
			
		||||
		if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
 | 
			
		||||
			/*
 | 
			
		||||
			 * We raced, and lost, someone else holds the reservation now,
 | 
			
		||||
			 * and is probably busy in ttm_bo_cleanup_memtype_use.
 | 
			
		||||
| 
						 | 
				
			
			@ -593,7 +593,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 | 
			
		|||
 | 
			
		||||
	if (ret || unlikely(list_empty(&bo->ddestroy))) {
 | 
			
		||||
		if (unlock_resv)
 | 
			
		||||
			reservation_object_unlock(bo->base.resv);
 | 
			
		||||
			dma_resv_unlock(bo->base.resv);
 | 
			
		||||
		spin_unlock(&glob->lru_lock);
 | 
			
		||||
		return ret;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -606,7 +606,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 | 
			
		|||
	ttm_bo_cleanup_memtype_use(bo);
 | 
			
		||||
 | 
			
		||||
	if (unlock_resv)
 | 
			
		||||
		reservation_object_unlock(bo->base.resv);
 | 
			
		||||
		dma_resv_unlock(bo->base.resv);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -634,12 +634,12 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
 | 
			
		|||
 | 
			
		||||
		if (remove_all || bo->base.resv != &bo->base._resv) {
 | 
			
		||||
			spin_unlock(&glob->lru_lock);
 | 
			
		||||
			reservation_object_lock(bo->base.resv, NULL);
 | 
			
		||||
			dma_resv_lock(bo->base.resv, NULL);
 | 
			
		||||
 | 
			
		||||
			spin_lock(&glob->lru_lock);
 | 
			
		||||
			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
 | 
			
		||||
 | 
			
		||||
		} else if (reservation_object_trylock(bo->base.resv)) {
 | 
			
		||||
		} else if (dma_resv_trylock(bo->base.resv)) {
 | 
			
		||||
			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
 | 
			
		||||
		} else {
 | 
			
		||||
			spin_unlock(&glob->lru_lock);
 | 
			
		||||
| 
						 | 
				
			
			@ -708,7 +708,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
 | 
			
		|||
	struct ttm_placement placement;
 | 
			
		||||
	int ret = 0;
 | 
			
		||||
 | 
			
		||||
	reservation_object_assert_held(bo->base.resv);
 | 
			
		||||
	dma_resv_assert_held(bo->base.resv);
 | 
			
		||||
 | 
			
		||||
	placement.num_placement = 0;
 | 
			
		||||
	placement.num_busy_placement = 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -779,7 +779,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
 | 
			
		|||
	bool ret = false;
 | 
			
		||||
 | 
			
		||||
	if (bo->base.resv == ctx->resv) {
 | 
			
		||||
		reservation_object_assert_held(bo->base.resv);
 | 
			
		||||
		dma_resv_assert_held(bo->base.resv);
 | 
			
		||||
		if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
 | 
			
		||||
		    || !list_empty(&bo->ddestroy))
 | 
			
		||||
			ret = true;
 | 
			
		||||
| 
						 | 
				
			
			@ -787,7 +787,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
 | 
			
		|||
		if (busy)
 | 
			
		||||
			*busy = false;
 | 
			
		||||
	} else {
 | 
			
		||||
		ret = reservation_object_trylock(bo->base.resv);
 | 
			
		||||
		ret = dma_resv_trylock(bo->base.resv);
 | 
			
		||||
		*locked = ret;
 | 
			
		||||
		if (busy)
 | 
			
		||||
			*busy = !ret;
 | 
			
		||||
| 
						 | 
				
			
			@ -815,10 +815,10 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
 | 
			
		|||
		return -EBUSY;
 | 
			
		||||
 | 
			
		||||
	if (ctx->interruptible)
 | 
			
		||||
		r = reservation_object_lock_interruptible(busy_bo->base.resv,
 | 
			
		||||
		r = dma_resv_lock_interruptible(busy_bo->base.resv,
 | 
			
		||||
							  ticket);
 | 
			
		||||
	else
 | 
			
		||||
		r = reservation_object_lock(busy_bo->base.resv, ticket);
 | 
			
		||||
		r = dma_resv_lock(busy_bo->base.resv, ticket);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * TODO: It would be better to keep the BO locked until allocation is at
 | 
			
		||||
| 
						 | 
				
			
			@ -826,7 +826,7 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
 | 
			
		|||
	 * of TTM.
 | 
			
		||||
	 */
 | 
			
		||||
	if (!r)
 | 
			
		||||
		reservation_object_unlock(busy_bo->base.resv);
 | 
			
		||||
		dma_resv_unlock(busy_bo->base.resv);
 | 
			
		||||
 | 
			
		||||
	return r == -EDEADLK ? -EBUSY : r;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -852,7 +852,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 | 
			
		|||
			if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
 | 
			
		||||
							    &busy)) {
 | 
			
		||||
				if (busy && !busy_bo && ticket !=
 | 
			
		||||
				    reservation_object_locking_ctx(bo->base.resv))
 | 
			
		||||
				    dma_resv_locking_ctx(bo->base.resv))
 | 
			
		||||
					busy_bo = bo;
 | 
			
		||||
				continue;
 | 
			
		||||
			}
 | 
			
		||||
| 
						 | 
				
			
			@ -860,7 +860,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 | 
			
		|||
			if (place && !bdev->driver->eviction_valuable(bo,
 | 
			
		||||
								      place)) {
 | 
			
		||||
				if (locked)
 | 
			
		||||
					reservation_object_unlock(bo->base.resv);
 | 
			
		||||
					dma_resv_unlock(bo->base.resv);
 | 
			
		||||
				continue;
 | 
			
		||||
			}
 | 
			
		||||
			break;
 | 
			
		||||
| 
						 | 
				
			
			@ -932,9 +932,9 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
 | 
			
		|||
	spin_unlock(&man->move_lock);
 | 
			
		||||
 | 
			
		||||
	if (fence) {
 | 
			
		||||
		reservation_object_add_shared_fence(bo->base.resv, fence);
 | 
			
		||||
		dma_resv_add_shared_fence(bo->base.resv, fence);
 | 
			
		||||
 | 
			
		||||
		ret = reservation_object_reserve_shared(bo->base.resv, 1);
 | 
			
		||||
		ret = dma_resv_reserve_shared(bo->base.resv, 1);
 | 
			
		||||
		if (unlikely(ret)) {
 | 
			
		||||
			dma_fence_put(fence);
 | 
			
		||||
			return ret;
 | 
			
		||||
| 
						 | 
				
			
			@ -961,7 +961,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
 | 
			
		|||
	struct ww_acquire_ctx *ticket;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	ticket = reservation_object_locking_ctx(bo->base.resv);
 | 
			
		||||
	ticket = dma_resv_locking_ctx(bo->base.resv);
 | 
			
		||||
	do {
 | 
			
		||||
		ret = (*man->func->get_node)(man, bo, place, mem);
 | 
			
		||||
		if (unlikely(ret != 0))
 | 
			
		||||
| 
						 | 
				
			
			@ -1091,7 +1091,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 | 
			
		|||
	bool type_found = false;
 | 
			
		||||
	int i, ret;
 | 
			
		||||
 | 
			
		||||
	ret = reservation_object_reserve_shared(bo->base.resv, 1);
 | 
			
		||||
	ret = dma_resv_reserve_shared(bo->base.resv, 1);
 | 
			
		||||
	if (unlikely(ret))
 | 
			
		||||
		return ret;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1172,7 +1172,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 | 
			
		|||
	int ret = 0;
 | 
			
		||||
	struct ttm_mem_reg mem;
 | 
			
		||||
 | 
			
		||||
	reservation_object_assert_held(bo->base.resv);
 | 
			
		||||
	dma_resv_assert_held(bo->base.resv);
 | 
			
		||||
 | 
			
		||||
	mem.num_pages = bo->num_pages;
 | 
			
		||||
	mem.size = mem.num_pages << PAGE_SHIFT;
 | 
			
		||||
| 
						 | 
				
			
			@ -1242,7 +1242,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
 | 
			
		|||
	int ret;
 | 
			
		||||
	uint32_t new_flags;
 | 
			
		||||
 | 
			
		||||
	reservation_object_assert_held(bo->base.resv);
 | 
			
		||||
	dma_resv_assert_held(bo->base.resv);
 | 
			
		||||
	/*
 | 
			
		||||
	 * Check whether we need to move buffer.
 | 
			
		||||
	 */
 | 
			
		||||
| 
						 | 
				
			
			@ -1279,7 +1279,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 | 
			
		|||
			 struct ttm_operation_ctx *ctx,
 | 
			
		||||
			 size_t acc_size,
 | 
			
		||||
			 struct sg_table *sg,
 | 
			
		||||
			 struct reservation_object *resv,
 | 
			
		||||
			 struct dma_resv *resv,
 | 
			
		||||
			 void (*destroy) (struct ttm_buffer_object *))
 | 
			
		||||
{
 | 
			
		||||
	int ret = 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -1333,7 +1333,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 | 
			
		|||
	bo->sg = sg;
 | 
			
		||||
	if (resv) {
 | 
			
		||||
		bo->base.resv = resv;
 | 
			
		||||
		reservation_object_assert_held(bo->base.resv);
 | 
			
		||||
		dma_resv_assert_held(bo->base.resv);
 | 
			
		||||
	} else {
 | 
			
		||||
		bo->base.resv = &bo->base._resv;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -1342,7 +1342,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 | 
			
		|||
		 * bo.gem is not initialized, so we have to setup the
 | 
			
		||||
		 * struct elements we want use regardless.
 | 
			
		||||
		 */
 | 
			
		||||
		reservation_object_init(&bo->base._resv);
 | 
			
		||||
		dma_resv_init(&bo->base._resv);
 | 
			
		||||
		drm_vma_node_reset(&bo->base.vma_node);
 | 
			
		||||
	}
 | 
			
		||||
	atomic_inc(&bo->bdev->glob->bo_count);
 | 
			
		||||
| 
						 | 
				
			
			@ -1360,7 +1360,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 | 
			
		|||
	 * since otherwise lockdep will be angered in radeon.
 | 
			
		||||
	 */
 | 
			
		||||
	if (!resv) {
 | 
			
		||||
		locked = reservation_object_trylock(bo->base.resv);
 | 
			
		||||
		locked = dma_resv_trylock(bo->base.resv);
 | 
			
		||||
		WARN_ON(!locked);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1394,7 +1394,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
 | 
			
		|||
		bool interruptible,
 | 
			
		||||
		size_t acc_size,
 | 
			
		||||
		struct sg_table *sg,
 | 
			
		||||
		struct reservation_object *resv,
 | 
			
		||||
		struct dma_resv *resv,
 | 
			
		||||
		void (*destroy) (struct ttm_buffer_object *))
 | 
			
		||||
{
 | 
			
		||||
	struct ttm_operation_ctx ctx = { interruptible, false };
 | 
			
		||||
| 
						 | 
				
			
			@ -1804,13 +1804,13 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
 | 
			
		|||
	long timeout = 15 * HZ;
 | 
			
		||||
 | 
			
		||||
	if (no_wait) {
 | 
			
		||||
		if (reservation_object_test_signaled_rcu(bo->base.resv, true))
 | 
			
		||||
		if (dma_resv_test_signaled_rcu(bo->base.resv, true))
 | 
			
		||||
			return 0;
 | 
			
		||||
		else
 | 
			
		||||
			return -EBUSY;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	timeout = reservation_object_wait_timeout_rcu(bo->base.resv, true,
 | 
			
		||||
	timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
 | 
			
		||||
						      interruptible, timeout);
 | 
			
		||||
	if (timeout < 0)
 | 
			
		||||
		return timeout;
 | 
			
		||||
| 
						 | 
				
			
			@ -1818,7 +1818,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
 | 
			
		|||
	if (timeout == 0)
 | 
			
		||||
		return -EBUSY;
 | 
			
		||||
 | 
			
		||||
	reservation_object_add_excl_fence(bo->base.resv, NULL);
 | 
			
		||||
	dma_resv_add_excl_fence(bo->base.resv, NULL);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(ttm_bo_wait);
 | 
			
		||||
| 
						 | 
				
			
			@ -1934,7 +1934,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
 | 
			
		|||
	 * already swapped buffer.
 | 
			
		||||
	 */
 | 
			
		||||
	if (locked)
 | 
			
		||||
		reservation_object_unlock(bo->base.resv);
 | 
			
		||||
		dma_resv_unlock(bo->base.resv);
 | 
			
		||||
	kref_put(&bo->list_kref, ttm_bo_release_list);
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1972,14 +1972,14 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
 | 
			
		|||
	ret = mutex_lock_interruptible(&bo->wu_mutex);
 | 
			
		||||
	if (unlikely(ret != 0))
 | 
			
		||||
		return -ERESTARTSYS;
 | 
			
		||||
	if (!reservation_object_is_locked(bo->base.resv))
 | 
			
		||||
	if (!dma_resv_is_locked(bo->base.resv))
 | 
			
		||||
		goto out_unlock;
 | 
			
		||||
	ret = reservation_object_lock_interruptible(bo->base.resv, NULL);
 | 
			
		||||
	ret = dma_resv_lock_interruptible(bo->base.resv, NULL);
 | 
			
		||||
	if (ret == -EINTR)
 | 
			
		||||
		ret = -ERESTARTSYS;
 | 
			
		||||
	if (unlikely(ret != 0))
 | 
			
		||||
		goto out_unlock;
 | 
			
		||||
	reservation_object_unlock(bo->base.resv);
 | 
			
		||||
	dma_resv_unlock(bo->base.resv);
 | 
			
		||||
 | 
			
		||||
out_unlock:
 | 
			
		||||
	mutex_unlock(&bo->wu_mutex);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -38,7 +38,7 @@
 | 
			
		|||
#include <linux/slab.h>
 | 
			
		||||
#include <linux/vmalloc.h>
 | 
			
		||||
#include <linux/module.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
 | 
			
		||||
struct ttm_transfer_obj {
 | 
			
		||||
	struct ttm_buffer_object base;
 | 
			
		||||
| 
						 | 
				
			
			@ -518,8 +518,8 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 | 
			
		|||
	fbo->base.destroy = &ttm_transfered_destroy;
 | 
			
		||||
	fbo->base.acc_size = 0;
 | 
			
		||||
	fbo->base.base.resv = &fbo->base.base._resv;
 | 
			
		||||
	reservation_object_init(fbo->base.base.resv);
 | 
			
		||||
	ret = reservation_object_trylock(fbo->base.base.resv);
 | 
			
		||||
	dma_resv_init(fbo->base.base.resv);
 | 
			
		||||
	ret = dma_resv_trylock(fbo->base.base.resv);
 | 
			
		||||
	WARN_ON(!ret);
 | 
			
		||||
 | 
			
		||||
	*new_obj = &fbo->base;
 | 
			
		||||
| 
						 | 
				
			
			@ -689,7 +689,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 | 
			
		|||
	int ret;
 | 
			
		||||
	struct ttm_buffer_object *ghost_obj;
 | 
			
		||||
 | 
			
		||||
	reservation_object_add_excl_fence(bo->base.resv, fence);
 | 
			
		||||
	dma_resv_add_excl_fence(bo->base.resv, fence);
 | 
			
		||||
	if (evict) {
 | 
			
		||||
		ret = ttm_bo_wait(bo, false, false);
 | 
			
		||||
		if (ret)
 | 
			
		||||
| 
						 | 
				
			
			@ -716,7 +716,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 | 
			
		|||
		if (ret)
 | 
			
		||||
			return ret;
 | 
			
		||||
 | 
			
		||||
		reservation_object_add_excl_fence(ghost_obj->base.resv, fence);
 | 
			
		||||
		dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
 | 
			
		||||
 | 
			
		||||
		/**
 | 
			
		||||
		 * If we're not moving to fixed memory, the TTM object
 | 
			
		||||
| 
						 | 
				
			
			@ -752,7 +752,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
 | 
			
		|||
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	reservation_object_add_excl_fence(bo->base.resv, fence);
 | 
			
		||||
	dma_resv_add_excl_fence(bo->base.resv, fence);
 | 
			
		||||
 | 
			
		||||
	if (!evict) {
 | 
			
		||||
		struct ttm_buffer_object *ghost_obj;
 | 
			
		||||
| 
						 | 
				
			
			@ -772,7 +772,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
 | 
			
		|||
		if (ret)
 | 
			
		||||
			return ret;
 | 
			
		||||
 | 
			
		||||
		reservation_object_add_excl_fence(ghost_obj->base.resv, fence);
 | 
			
		||||
		dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
 | 
			
		||||
 | 
			
		||||
		/**
 | 
			
		||||
		 * If we're not moving to fixed memory, the TTM object
 | 
			
		||||
| 
						 | 
				
			
			@ -841,7 +841,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
 | 
			
		|||
	if (ret)
 | 
			
		||||
		return ret;
 | 
			
		||||
 | 
			
		||||
	ret = reservation_object_copy_fences(ghost->base.resv, bo->base.resv);
 | 
			
		||||
	ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv);
 | 
			
		||||
	/* Last resort, wait for the BO to be idle when we are OOM */
 | 
			
		||||
	if (ret)
 | 
			
		||||
		ttm_bo_wait(bo, false, false);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -71,7 +71,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
 | 
			
		|||
		ttm_bo_get(bo);
 | 
			
		||||
		up_read(&vmf->vma->vm_mm->mmap_sem);
 | 
			
		||||
		(void) dma_fence_wait(bo->moving, true);
 | 
			
		||||
		reservation_object_unlock(bo->base.resv);
 | 
			
		||||
		dma_resv_unlock(bo->base.resv);
 | 
			
		||||
		ttm_bo_put(bo);
 | 
			
		||||
		goto out_unlock;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -131,7 +131,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
 | 
			
		|||
	 * for reserve, and if it fails, retry the fault after waiting
 | 
			
		||||
	 * for the buffer to become unreserved.
 | 
			
		||||
	 */
 | 
			
		||||
	if (unlikely(!reservation_object_trylock(bo->base.resv))) {
 | 
			
		||||
	if (unlikely(!dma_resv_trylock(bo->base.resv))) {
 | 
			
		||||
		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
 | 
			
		||||
			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
 | 
			
		||||
				ttm_bo_get(bo);
 | 
			
		||||
| 
						 | 
				
			
			@ -296,7 +296,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
 | 
			
		|||
out_io_unlock:
 | 
			
		||||
	ttm_mem_io_unlock(man);
 | 
			
		||||
out_unlock:
 | 
			
		||||
	reservation_object_unlock(bo->base.resv);
 | 
			
		||||
	dma_resv_unlock(bo->base.resv);
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -39,7 +39,7 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
 | 
			
		|||
	list_for_each_entry_continue_reverse(entry, list, head) {
 | 
			
		||||
		struct ttm_buffer_object *bo = entry->bo;
 | 
			
		||||
 | 
			
		||||
		reservation_object_unlock(bo->base.resv);
 | 
			
		||||
		dma_resv_unlock(bo->base.resv);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -71,7 +71,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
 | 
			
		|||
 | 
			
		||||
		if (list_empty(&bo->lru))
 | 
			
		||||
			ttm_bo_add_to_lru(bo);
 | 
			
		||||
		reservation_object_unlock(bo->base.resv);
 | 
			
		||||
		dma_resv_unlock(bo->base.resv);
 | 
			
		||||
	}
 | 
			
		||||
	spin_unlock(&glob->lru_lock);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -114,7 +114,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 | 
			
		|||
 | 
			
		||||
		ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
 | 
			
		||||
		if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
 | 
			
		||||
			reservation_object_unlock(bo->base.resv);
 | 
			
		||||
			dma_resv_unlock(bo->base.resv);
 | 
			
		||||
 | 
			
		||||
			ret = -EBUSY;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -130,7 +130,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 | 
			
		|||
			if (!entry->num_shared)
 | 
			
		||||
				continue;
 | 
			
		||||
 | 
			
		||||
			ret = reservation_object_reserve_shared(bo->base.resv,
 | 
			
		||||
			ret = dma_resv_reserve_shared(bo->base.resv,
 | 
			
		||||
								entry->num_shared);
 | 
			
		||||
			if (!ret)
 | 
			
		||||
				continue;
 | 
			
		||||
| 
						 | 
				
			
			@ -144,16 +144,16 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 | 
			
		|||
 | 
			
		||||
		if (ret == -EDEADLK) {
 | 
			
		||||
			if (intr) {
 | 
			
		||||
				ret = reservation_object_lock_slow_interruptible(bo->base.resv,
 | 
			
		||||
				ret = dma_resv_lock_slow_interruptible(bo->base.resv,
 | 
			
		||||
										 ticket);
 | 
			
		||||
			} else {
 | 
			
		||||
				reservation_object_lock_slow(bo->base.resv, ticket);
 | 
			
		||||
				dma_resv_lock_slow(bo->base.resv, ticket);
 | 
			
		||||
				ret = 0;
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (!ret && entry->num_shared)
 | 
			
		||||
			ret = reservation_object_reserve_shared(bo->base.resv,
 | 
			
		||||
			ret = dma_resv_reserve_shared(bo->base.resv,
 | 
			
		||||
								entry->num_shared);
 | 
			
		||||
 | 
			
		||||
		if (unlikely(ret != 0)) {
 | 
			
		||||
| 
						 | 
				
			
			@ -201,14 +201,14 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
 | 
			
		|||
	list_for_each_entry(entry, list, head) {
 | 
			
		||||
		bo = entry->bo;
 | 
			
		||||
		if (entry->num_shared)
 | 
			
		||||
			reservation_object_add_shared_fence(bo->base.resv, fence);
 | 
			
		||||
			dma_resv_add_shared_fence(bo->base.resv, fence);
 | 
			
		||||
		else
 | 
			
		||||
			reservation_object_add_excl_fence(bo->base.resv, fence);
 | 
			
		||||
			dma_resv_add_excl_fence(bo->base.resv, fence);
 | 
			
		||||
		if (list_empty(&bo->lru))
 | 
			
		||||
			ttm_bo_add_to_lru(bo);
 | 
			
		||||
		else
 | 
			
		||||
			ttm_bo_move_to_lru_tail(bo, NULL);
 | 
			
		||||
		reservation_object_unlock(bo->base.resv);
 | 
			
		||||
		dma_resv_unlock(bo->base.resv);
 | 
			
		||||
	}
 | 
			
		||||
	spin_unlock(&glob->lru_lock);
 | 
			
		||||
	if (ticket)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -48,7 +48,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
 | 
			
		|||
	struct ttm_bo_device *bdev = bo->bdev;
 | 
			
		||||
	uint32_t page_flags = 0;
 | 
			
		||||
 | 
			
		||||
	reservation_object_assert_held(bo->base.resv);
 | 
			
		||||
	dma_resv_assert_held(bo->base.resv);
 | 
			
		||||
 | 
			
		||||
	if (bdev->need_dma32)
 | 
			
		||||
		page_flags |= TTM_PAGE_FLAG_DMA32;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -409,7 +409,7 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
 | 
			
		|||
	if (args->pad != 0)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	ret = drm_gem_reservation_object_wait(file_priv, args->handle,
 | 
			
		||||
	ret = drm_gem_dma_resv_wait(file_priv, args->handle,
 | 
			
		||||
					      true, timeout_jiffies);
 | 
			
		||||
 | 
			
		||||
	/* Decrement the user's timeout, in case we got interrupted
 | 
			
		||||
| 
						 | 
				
			
			@ -495,7 +495,7 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
 | 
			
		|||
 | 
			
		||||
	for (i = 0; i < job->bo_count; i++) {
 | 
			
		||||
		/* XXX: Use shared fences for read-only objects. */
 | 
			
		||||
		reservation_object_add_excl_fence(job->bo[i]->resv,
 | 
			
		||||
		dma_resv_add_excl_fence(job->bo[i]->resv,
 | 
			
		||||
						  job->done_fence);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -543,7 +543,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
 | 
			
		|||
		bo = to_vc4_bo(&exec->bo[i]->base);
 | 
			
		||||
		bo->seqno = seqno;
 | 
			
		||||
 | 
			
		||||
		reservation_object_add_shared_fence(bo->base.base.resv, exec->fence);
 | 
			
		||||
		dma_resv_add_shared_fence(bo->base.base.resv, exec->fence);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	list_for_each_entry(bo, &exec->unref_list, unref_head) {
 | 
			
		||||
| 
						 | 
				
			
			@ -554,7 +554,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
 | 
			
		|||
		bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
 | 
			
		||||
		bo->write_seqno = seqno;
 | 
			
		||||
 | 
			
		||||
		reservation_object_add_excl_fence(bo->base.base.resv, exec->fence);
 | 
			
		||||
		dma_resv_add_excl_fence(bo->base.base.resv, exec->fence);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -642,7 +642,7 @@ vc4_lock_bo_reservations(struct drm_device *dev,
 | 
			
		|||
	for (i = 0; i < exec->bo_count; i++) {
 | 
			
		||||
		bo = &exec->bo[i]->base;
 | 
			
		||||
 | 
			
		||||
		ret = reservation_object_reserve_shared(bo->resv, 1);
 | 
			
		||||
		ret = dma_resv_reserve_shared(bo->resv, 1);
 | 
			
		||||
		if (ret) {
 | 
			
		||||
			vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
 | 
			
		||||
			return ret;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -21,7 +21,7 @@
 | 
			
		|||
 */
 | 
			
		||||
 | 
			
		||||
#include <linux/dma-buf.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
 | 
			
		||||
#include <drm/drm_file.h>
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -128,7 +128,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
 | 
			
		|||
{
 | 
			
		||||
	struct drm_vgem_fence_attach *arg = data;
 | 
			
		||||
	struct vgem_file *vfile = file->driver_priv;
 | 
			
		||||
	struct reservation_object *resv;
 | 
			
		||||
	struct dma_resv *resv;
 | 
			
		||||
	struct drm_gem_object *obj;
 | 
			
		||||
	struct dma_fence *fence;
 | 
			
		||||
	int ret;
 | 
			
		||||
| 
						 | 
				
			
			@ -151,7 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
 | 
			
		|||
 | 
			
		||||
	/* Check for a conflicting fence */
 | 
			
		||||
	resv = obj->resv;
 | 
			
		||||
	if (!reservation_object_test_signaled_rcu(resv,
 | 
			
		||||
	if (!dma_resv_test_signaled_rcu(resv,
 | 
			
		||||
						  arg->flags & VGEM_FENCE_WRITE)) {
 | 
			
		||||
		ret = -EBUSY;
 | 
			
		||||
		goto err_fence;
 | 
			
		||||
| 
						 | 
				
			
			@ -159,12 +159,12 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
 | 
			
		|||
 | 
			
		||||
	/* Expose the fence via the dma-buf */
 | 
			
		||||
	ret = 0;
 | 
			
		||||
	reservation_object_lock(resv, NULL);
 | 
			
		||||
	dma_resv_lock(resv, NULL);
 | 
			
		||||
	if (arg->flags & VGEM_FENCE_WRITE)
 | 
			
		||||
		reservation_object_add_excl_fence(resv, fence);
 | 
			
		||||
	else if ((ret = reservation_object_reserve_shared(resv, 1)) == 0)
 | 
			
		||||
		reservation_object_add_shared_fence(resv, fence);
 | 
			
		||||
	reservation_object_unlock(resv);
 | 
			
		||||
		dma_resv_add_excl_fence(resv, fence);
 | 
			
		||||
	else if ((ret = dma_resv_reserve_shared(resv, 1)) == 0)
 | 
			
		||||
		dma_resv_add_shared_fence(resv, fence);
 | 
			
		||||
	dma_resv_unlock(resv);
 | 
			
		||||
 | 
			
		||||
	/* Record the fence in our idr for later signaling */
 | 
			
		||||
	if (ret == 0) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -396,7 +396,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
 | 
			
		|||
		(vgdev, qobj->hw_res_handle,
 | 
			
		||||
		 vfpriv->ctx_id, offset, args->level,
 | 
			
		||||
		 &box, fence);
 | 
			
		||||
	reservation_object_add_excl_fence(qobj->tbo.base.resv,
 | 
			
		||||
	dma_resv_add_excl_fence(qobj->tbo.base.resv,
 | 
			
		||||
					  &fence->f);
 | 
			
		||||
 | 
			
		||||
	dma_fence_put(&fence->f);
 | 
			
		||||
| 
						 | 
				
			
			@ -450,7 +450,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
 | 
			
		|||
			(vgdev, qobj,
 | 
			
		||||
			 vfpriv ? vfpriv->ctx_id : 0, offset,
 | 
			
		||||
			 args->level, &box, fence);
 | 
			
		||||
		reservation_object_add_excl_fence(qobj->tbo.base.resv,
 | 
			
		||||
		dma_resv_add_excl_fence(qobj->tbo.base.resv,
 | 
			
		||||
						  &fence->f);
 | 
			
		||||
		dma_fence_put(&fence->f);
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -212,7 +212,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
 | 
			
		|||
			 0, 0, vgfb->fence);
 | 
			
		||||
		ret = virtio_gpu_object_reserve(bo, false);
 | 
			
		||||
		if (!ret) {
 | 
			
		||||
			reservation_object_add_excl_fence(bo->tbo.base.resv,
 | 
			
		||||
			dma_resv_add_excl_fence(bo->tbo.base.resv,
 | 
			
		||||
							  &vgfb->fence->f);
 | 
			
		||||
			dma_fence_put(&vgfb->fence->f);
 | 
			
		||||
			vgfb->fence = NULL;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -459,9 +459,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
 | 
			
		|||
 | 
			
		||||
	/* Buffer objects need to be either pinned or reserved: */
 | 
			
		||||
	if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
 | 
			
		||||
		reservation_object_assert_held(dst->base.resv);
 | 
			
		||||
		dma_resv_assert_held(dst->base.resv);
 | 
			
		||||
	if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
 | 
			
		||||
		reservation_object_assert_held(src->base.resv);
 | 
			
		||||
		dma_resv_assert_held(src->base.resv);
 | 
			
		||||
 | 
			
		||||
	if (dst->ttm->state == tt_unpopulated) {
 | 
			
		||||
		ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -342,7 +342,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
 | 
			
		|||
	uint32_t old_mem_type = bo->mem.mem_type;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	reservation_object_assert_held(bo->base.resv);
 | 
			
		||||
	dma_resv_assert_held(bo->base.resv);
 | 
			
		||||
 | 
			
		||||
	if (pin) {
 | 
			
		||||
		if (vbo->pin_count++ > 0)
 | 
			
		||||
| 
						 | 
				
			
			@ -689,7 +689,7 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
 | 
			
		|||
		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
 | 
			
		||||
		long lret;
 | 
			
		||||
 | 
			
		||||
		lret = reservation_object_wait_timeout_rcu
 | 
			
		||||
		lret = dma_resv_wait_timeout_rcu
 | 
			
		||||
			(bo->base.resv, true, true,
 | 
			
		||||
			 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
 | 
			
		||||
		if (!lret)
 | 
			
		||||
| 
						 | 
				
			
			@ -1007,10 +1007,10 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
 | 
			
		|||
 | 
			
		||||
	if (fence == NULL) {
 | 
			
		||||
		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 | 
			
		||||
		reservation_object_add_excl_fence(bo->base.resv, &fence->base);
 | 
			
		||||
		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
 | 
			
		||||
		dma_fence_put(&fence->base);
 | 
			
		||||
	} else
 | 
			
		||||
		reservation_object_add_excl_fence(bo->base.resv, &fence->base);
 | 
			
		||||
		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -169,7 +169,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
 | 
			
		|||
	} *cmd;
 | 
			
		||||
 | 
			
		||||
	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
 | 
			
		||||
	reservation_object_assert_held(bo->base.resv);
 | 
			
		||||
	dma_resv_assert_held(bo->base.resv);
 | 
			
		||||
 | 
			
		||||
	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 | 
			
		||||
	if (!cmd)
 | 
			
		||||
| 
						 | 
				
			
			@ -311,7 +311,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
 | 
			
		|||
		return 0;
 | 
			
		||||
 | 
			
		||||
	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
 | 
			
		||||
	reservation_object_assert_held(bo->base.resv);
 | 
			
		||||
	dma_resv_assert_held(bo->base.resv);
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&dev_priv->binding_mutex);
 | 
			
		||||
	if (!vcotbl->scrubbed)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -402,14 +402,14 @@ void vmw_resource_unreserve(struct vmw_resource *res,
 | 
			
		|||
 | 
			
		||||
	if (switch_backup && new_backup != res->backup) {
 | 
			
		||||
		if (res->backup) {
 | 
			
		||||
			reservation_object_assert_held(res->backup->base.base.resv);
 | 
			
		||||
			dma_resv_assert_held(res->backup->base.base.resv);
 | 
			
		||||
			list_del_init(&res->mob_head);
 | 
			
		||||
			vmw_bo_unreference(&res->backup);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (new_backup) {
 | 
			
		||||
			res->backup = vmw_bo_reference(new_backup);
 | 
			
		||||
			reservation_object_assert_held(new_backup->base.base.resv);
 | 
			
		||||
			dma_resv_assert_held(new_backup->base.base.resv);
 | 
			
		||||
			list_add_tail(&res->mob_head, &new_backup->res_list);
 | 
			
		||||
		} else {
 | 
			
		||||
			res->backup = NULL;
 | 
			
		||||
| 
						 | 
				
			
			@ -691,7 +691,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
 | 
			
		|||
		.num_shared = 0
 | 
			
		||||
	};
 | 
			
		||||
 | 
			
		||||
	reservation_object_assert_held(vbo->base.base.resv);
 | 
			
		||||
	dma_resv_assert_held(vbo->base.base.resv);
 | 
			
		||||
	list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
 | 
			
		||||
		if (!res->func->unbind)
 | 
			
		||||
			continue;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -87,7 +87,7 @@ struct module;
 | 
			
		|||
 | 
			
		||||
struct device_node;
 | 
			
		||||
struct videomode;
 | 
			
		||||
struct reservation_object;
 | 
			
		||||
struct dma_resv;
 | 
			
		||||
struct dma_buf_attachment;
 | 
			
		||||
 | 
			
		||||
struct pci_dev;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -35,7 +35,7 @@
 | 
			
		|||
 */
 | 
			
		||||
 | 
			
		||||
#include <linux/kref.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
 | 
			
		||||
#include <drm/drm_vma_manager.h>
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -276,7 +276,7 @@ struct drm_gem_object {
 | 
			
		|||
	 *
 | 
			
		||||
	 * Normally (@resv == &@_resv) except for imported GEM objects.
 | 
			
		||||
	 */
 | 
			
		||||
	struct reservation_object *resv;
 | 
			
		||||
	struct dma_resv *resv;
 | 
			
		||||
 | 
			
		||||
	/**
 | 
			
		||||
	 * @_resv:
 | 
			
		||||
| 
						 | 
				
			
			@ -285,7 +285,7 @@ struct drm_gem_object {
 | 
			
		|||
	 *
 | 
			
		||||
	 * This is unused for imported GEM objects.
 | 
			
		||||
	 */
 | 
			
		||||
	struct reservation_object _resv;
 | 
			
		||||
	struct dma_resv _resv;
 | 
			
		||||
 | 
			
		||||
	/**
 | 
			
		||||
	 * @funcs:
 | 
			
		||||
| 
						 | 
				
			
			@ -390,7 +390,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
 | 
			
		|||
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
 | 
			
		||||
			   int count, struct drm_gem_object ***objs_out);
 | 
			
		||||
struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
 | 
			
		||||
long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
 | 
			
		||||
long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
 | 
			
		||||
				    bool wait_all, unsigned long timeout);
 | 
			
		||||
int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
 | 
			
		||||
			      struct ww_acquire_ctx *acquire_ctx);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -40,7 +40,7 @@
 | 
			
		|||
#include <linux/mutex.h>
 | 
			
		||||
#include <linux/mm.h>
 | 
			
		||||
#include <linux/bitmap.h>
 | 
			
		||||
#include <linux/reservation.h>
 | 
			
		||||
#include <linux/dma-resv.h>
 | 
			
		||||
 | 
			
		||||
struct ttm_bo_global;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -273,7 +273,7 @@ struct ttm_bo_kmap_obj {
 | 
			
		|||
struct ttm_operation_ctx {
 | 
			
		||||
	bool interruptible;
 | 
			
		||||
	bool no_wait_gpu;
 | 
			
		||||
	struct reservation_object *resv;
 | 
			
		||||
	struct dma_resv *resv;
 | 
			
		||||
	uint64_t bytes_moved;
 | 
			
		||||
	uint32_t flags;
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			@ -493,7 +493,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
 | 
			
		|||
 * @page_alignment: Data alignment in pages.
 | 
			
		||||
 * @ctx: TTM operation context for memory allocation.
 | 
			
		||||
 * @acc_size: Accounted size for this object.
 | 
			
		||||
 * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
 | 
			
		||||
 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
 | 
			
		||||
 * @destroy: Destroy function. Use NULL for kfree().
 | 
			
		||||
 *
 | 
			
		||||
 * This function initializes a pre-allocated struct ttm_buffer_object.
 | 
			
		||||
| 
						 | 
				
			
			@ -526,7 +526,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 | 
			
		|||
			 struct ttm_operation_ctx *ctx,
 | 
			
		||||
			 size_t acc_size,
 | 
			
		||||
			 struct sg_table *sg,
 | 
			
		||||
			 struct reservation_object *resv,
 | 
			
		||||
			 struct dma_resv *resv,
 | 
			
		||||
			 void (*destroy) (struct ttm_buffer_object *));
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			@ -545,7 +545,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 | 
			
		|||
 * point to the shmem object backing a GEM object if TTM is used to back a
 | 
			
		||||
 * GEM user interface.
 | 
			
		||||
 * @acc_size: Accounted size for this object.
 | 
			
		||||
 * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
 | 
			
		||||
 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
 | 
			
		||||
 * @destroy: Destroy function. Use NULL for kfree().
 | 
			
		||||
 *
 | 
			
		||||
 * This function initializes a pre-allocated struct ttm_buffer_object.
 | 
			
		||||
| 
						 | 
				
			
			@ -570,7 +570,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
 | 
			
		|||
		unsigned long size, enum ttm_bo_type type,
 | 
			
		||||
		struct ttm_placement *placement,
 | 
			
		||||
		uint32_t page_alignment, bool interrubtible, size_t acc_size,
 | 
			
		||||
		struct sg_table *sg, struct reservation_object *resv,
 | 
			
		||||
		struct sg_table *sg, struct dma_resv *resv,
 | 
			
		||||
		void (*destroy) (struct ttm_buffer_object *));
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
Some files were not shown because too many files have changed in this diff Show more
		Loading…
	
		Reference in a new issue