mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	io_uring: lockdep annotate CQ locking
Locking around CQE posting is complex and depends on options the ring is created with, add more thorough lockdep annotations checking all invariants. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/aa3770b4eacae3915d782cc2ab2f395a99b4b232.1672795976.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									9ffa13ff78
								
							
						
					
					
						commit
						f26cc95935
					
				
					 2 changed files with 17 additions and 3 deletions
				
			
		| 
						 | 
					@ -731,6 +731,8 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
 | 
				
			||||||
	size_t ocq_size = sizeof(struct io_overflow_cqe);
 | 
						size_t ocq_size = sizeof(struct io_overflow_cqe);
 | 
				
			||||||
	bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
 | 
						bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						lockdep_assert_held(&ctx->completion_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (is_cqe32)
 | 
						if (is_cqe32)
 | 
				
			||||||
		ocq_size += sizeof(struct io_uring_cqe);
 | 
							ocq_size += sizeof(struct io_uring_cqe);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -820,9 +822,6 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct io_uring_cqe *cqe;
 | 
						struct io_uring_cqe *cqe;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!ctx->task_complete)
 | 
					 | 
				
			||||||
		lockdep_assert_held(&ctx->completion_lock);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	ctx->cq_extra++;
 | 
						ctx->cq_extra++;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -79,6 +79,19 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
 | 
				
			||||||
bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
 | 
					bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
 | 
				
			||||||
			bool cancel_all);
 | 
								bool cancel_all);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define io_lockdep_assert_cq_locked(ctx)				\
 | 
				
			||||||
 | 
						do {								\
 | 
				
			||||||
 | 
							if (ctx->flags & IORING_SETUP_IOPOLL) {			\
 | 
				
			||||||
 | 
								lockdep_assert_held(&ctx->uring_lock);		\
 | 
				
			||||||
 | 
							} else if (!ctx->task_complete) {			\
 | 
				
			||||||
 | 
								lockdep_assert_held(&ctx->completion_lock);	\
 | 
				
			||||||
 | 
							} else if (ctx->submitter_task->flags & PF_EXITING) {	\
 | 
				
			||||||
 | 
								lockdep_assert(current_work());			\
 | 
				
			||||||
 | 
							} else {						\
 | 
				
			||||||
 | 
								lockdep_assert(current == ctx->submitter_task);	\
 | 
				
			||||||
 | 
							}							\
 | 
				
			||||||
 | 
						} while (0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void io_req_task_work_add(struct io_kiocb *req)
 | 
					static inline void io_req_task_work_add(struct io_kiocb *req)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	__io_req_task_work_add(req, true);
 | 
						__io_req_task_work_add(req, true);
 | 
				
			||||||
| 
						 | 
					@ -92,6 +105,8 @@ void io_cq_unlock_post(struct io_ring_ctx *ctx);
 | 
				
			||||||
static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
 | 
					static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
 | 
				
			||||||
						       bool overflow)
 | 
											       bool overflow)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						io_lockdep_assert_cq_locked(ctx);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
 | 
						if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
 | 
				
			||||||
		struct io_uring_cqe *cqe = ctx->cqe_cached;
 | 
							struct io_uring_cqe *cqe = ctx->cqe_cached;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue