forked from mirrors/linux
		
	media: v4l: event: Prevent freeing event subscriptions while accessed
The event subscriptions are added to the subscribed event list while
holding a spinlock, but that lock is subsequently released while still
accessing the subscription object. This makes it possible to unsubscribe
the event --- and freeing the subscription object's memory --- while
the subscription object is simultaneously accessed.
Prevent this by adding a mutex to serialise the event subscription and
unsubscription. This also gives a guarantee to the callback ops that the
add op has returned before the del op is called.
This change also results in making the elems field less special:
subscriptions are only added to the event list once they are fully
initialised.
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Reviewed-by: Hans Verkuil <hans.verkuil@cisco.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Cc: stable@vger.kernel.org # for 4.14 and up
Fixes: c3b5b0241f ("V4L/DVB: V4L: Events: Add backend")
Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
			
			
This commit is contained in:
		
							parent
							
								
									324493fba7
								
							
						
					
					
						commit
						ad608fbcf1
					
				
					 3 changed files with 26 additions and 18 deletions
				
			
		| 
						 | 
					@ -115,14 +115,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
 | 
				
			||||||
	if (sev == NULL)
 | 
						if (sev == NULL)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * If the event has been added to the fh->subscribed list, but its
 | 
					 | 
				
			||||||
	 * add op has not completed yet elems will be 0, treat this as
 | 
					 | 
				
			||||||
	 * not being subscribed.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (!sev->elems)
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* Increase event sequence number on fh. */
 | 
						/* Increase event sequence number on fh. */
 | 
				
			||||||
	fh->sequence++;
 | 
						fh->sequence++;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -208,6 +200,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
 | 
				
			||||||
	struct v4l2_subscribed_event *sev, *found_ev;
 | 
						struct v4l2_subscribed_event *sev, *found_ev;
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
	unsigned i;
 | 
						unsigned i;
 | 
				
			||||||
 | 
						int ret = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (sub->type == V4L2_EVENT_ALL)
 | 
						if (sub->type == V4L2_EVENT_ALL)
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
| 
						 | 
					@ -225,31 +218,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
 | 
				
			||||||
	sev->flags = sub->flags;
 | 
						sev->flags = sub->flags;
 | 
				
			||||||
	sev->fh = fh;
 | 
						sev->fh = fh;
 | 
				
			||||||
	sev->ops = ops;
 | 
						sev->ops = ops;
 | 
				
			||||||
 | 
						sev->elems = elems;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mutex_lock(&fh->subscribe_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 | 
						spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 | 
				
			||||||
	found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
 | 
						found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
 | 
				
			||||||
	if (!found_ev)
 | 
					 | 
				
			||||||
		list_add(&sev->list, &fh->subscribed);
 | 
					 | 
				
			||||||
	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 | 
						spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (found_ev) {
 | 
						if (found_ev) {
 | 
				
			||||||
 | 
							/* Already listening */
 | 
				
			||||||
		kvfree(sev);
 | 
							kvfree(sev);
 | 
				
			||||||
		return 0; /* Already listening */
 | 
							goto out_unlock;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (sev->ops && sev->ops->add) {
 | 
						if (sev->ops && sev->ops->add) {
 | 
				
			||||||
		int ret = sev->ops->add(sev, elems);
 | 
							ret = sev->ops->add(sev, elems);
 | 
				
			||||||
		if (ret) {
 | 
							if (ret) {
 | 
				
			||||||
			sev->ops = NULL;
 | 
								kvfree(sev);
 | 
				
			||||||
			v4l2_event_unsubscribe(fh, sub);
 | 
								goto out_unlock;
 | 
				
			||||||
			return ret;
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Mark as ready for use */
 | 
						spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 | 
				
			||||||
	sev->elems = elems;
 | 
						list_add(&sev->list, &fh->subscribed);
 | 
				
			||||||
 | 
						spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
					out_unlock:
 | 
				
			||||||
 | 
						mutex_unlock(&fh->subscribe_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
 | 
					EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -288,6 +286,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mutex_lock(&fh->subscribe_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 | 
						spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	sev = v4l2_event_subscribed(fh, sub->type, sub->id);
 | 
						sev = v4l2_event_subscribed(fh, sub->type, sub->id);
 | 
				
			||||||
| 
						 | 
					@ -305,6 +305,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
 | 
				
			||||||
	if (sev && sev->ops && sev->ops->del)
 | 
						if (sev && sev->ops && sev->ops->del)
 | 
				
			||||||
		sev->ops->del(sev);
 | 
							sev->ops->del(sev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mutex_unlock(&fh->subscribe_lock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kvfree(sev);
 | 
						kvfree(sev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -45,6 +45,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
 | 
				
			||||||
	INIT_LIST_HEAD(&fh->available);
 | 
						INIT_LIST_HEAD(&fh->available);
 | 
				
			||||||
	INIT_LIST_HEAD(&fh->subscribed);
 | 
						INIT_LIST_HEAD(&fh->subscribed);
 | 
				
			||||||
	fh->sequence = -1;
 | 
						fh->sequence = -1;
 | 
				
			||||||
 | 
						mutex_init(&fh->subscribe_lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(v4l2_fh_init);
 | 
					EXPORT_SYMBOL_GPL(v4l2_fh_init);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -90,6 +91,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	v4l_disable_media_source(fh->vdev);
 | 
						v4l_disable_media_source(fh->vdev);
 | 
				
			||||||
	v4l2_event_unsubscribe_all(fh);
 | 
						v4l2_event_unsubscribe_all(fh);
 | 
				
			||||||
 | 
						mutex_destroy(&fh->subscribe_lock);
 | 
				
			||||||
	fh->vdev = NULL;
 | 
						fh->vdev = NULL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
 | 
					EXPORT_SYMBOL_GPL(v4l2_fh_exit);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -38,10 +38,13 @@ struct v4l2_ctrl_handler;
 | 
				
			||||||
 * @prio: priority of the file handler, as defined by &enum v4l2_priority
 | 
					 * @prio: priority of the file handler, as defined by &enum v4l2_priority
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * @wait: event' s wait queue
 | 
					 * @wait: event' s wait queue
 | 
				
			||||||
 | 
					 * @subscribe_lock: serialise changes to the subscribed list; guarantee that
 | 
				
			||||||
 | 
					 *		    the add and del event callbacks are orderly called
 | 
				
			||||||
 * @subscribed: list of subscribed events
 | 
					 * @subscribed: list of subscribed events
 | 
				
			||||||
 * @available: list of events waiting to be dequeued
 | 
					 * @available: list of events waiting to be dequeued
 | 
				
			||||||
 * @navailable: number of available events at @available list
 | 
					 * @navailable: number of available events at @available list
 | 
				
			||||||
 * @sequence: event sequence number
 | 
					 * @sequence: event sequence number
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 * @m2m_ctx: pointer to &struct v4l2_m2m_ctx
 | 
					 * @m2m_ctx: pointer to &struct v4l2_m2m_ctx
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
struct v4l2_fh {
 | 
					struct v4l2_fh {
 | 
				
			||||||
| 
						 | 
					@ -52,6 +55,7 @@ struct v4l2_fh {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Events */
 | 
						/* Events */
 | 
				
			||||||
	wait_queue_head_t	wait;
 | 
						wait_queue_head_t	wait;
 | 
				
			||||||
 | 
						struct mutex		subscribe_lock;
 | 
				
			||||||
	struct list_head	subscribed;
 | 
						struct list_head	subscribed;
 | 
				
			||||||
	struct list_head	available;
 | 
						struct list_head	available;
 | 
				
			||||||
	unsigned int		navailable;
 | 
						unsigned int		navailable;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue