forked from mirrors/linux
		
	vmbus: split ring buffer allocation from open
The UIO driver needs the ring buffer to be persistent(reused) across open/close. Split the allocation and setup of ring buffer out of vmbus_open. For normal usage vmbus_open/vmbus_close there are no changes; only impacts uio_hv_generic which needs to keep ring buffer memory and reuse when application restarts. Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
		
							parent
							
								
									52a42c2a90
								
							
						
					
					
						commit
						ae6935ed7d
					
				
					 3 changed files with 169 additions and 122 deletions
				
			
		|  | @ -79,84 +79,96 @@ void vmbus_setevent(struct vmbus_channel *channel) | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(vmbus_setevent); | EXPORT_SYMBOL_GPL(vmbus_setevent); | ||||||
| 
 | 
 | ||||||
| /*
 | /* vmbus_free_ring - drop mapping of ring buffer */ | ||||||
|  * vmbus_open - Open the specified channel. | void vmbus_free_ring(struct vmbus_channel *channel) | ||||||
|  */ |  | ||||||
| int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, |  | ||||||
| 		     u32 recv_ringbuffer_size, void *userdata, u32 userdatalen, |  | ||||||
| 		     void (*onchannelcallback)(void *context), void *context) |  | ||||||
| { | { | ||||||
| 	struct vmbus_channel_open_channel *open_msg; | 	hv_ringbuffer_cleanup(&channel->outbound); | ||||||
| 	struct vmbus_channel_msginfo *open_info = NULL; | 	hv_ringbuffer_cleanup(&channel->inbound); | ||||||
| 	unsigned long flags; |  | ||||||
| 	int ret, err = 0; |  | ||||||
| 	struct page *page; |  | ||||||
| 	unsigned int order; |  | ||||||
| 
 | 
 | ||||||
| 	if (send_ringbuffer_size % PAGE_SIZE || | 	if (channel->ringbuffer_page) { | ||||||
| 	    recv_ringbuffer_size % PAGE_SIZE) | 		__free_pages(channel->ringbuffer_page, | ||||||
| 		return -EINVAL; | 			     get_order(channel->ringbuffer_pagecount | ||||||
| 
 | 				       << PAGE_SHIFT)); | ||||||
| 	order = get_order(send_ringbuffer_size + recv_ringbuffer_size); | 		channel->ringbuffer_page = NULL; | ||||||
| 
 |  | ||||||
| 	spin_lock_irqsave(&newchannel->lock, flags); |  | ||||||
| 	if (newchannel->state == CHANNEL_OPEN_STATE) { |  | ||||||
| 		newchannel->state = CHANNEL_OPENING_STATE; |  | ||||||
| 	} else { |  | ||||||
| 		spin_unlock_irqrestore(&newchannel->lock, flags); |  | ||||||
| 		return -EINVAL; |  | ||||||
| 	} | 	} | ||||||
| 	spin_unlock_irqrestore(&newchannel->lock, flags); | } | ||||||
|  | EXPORT_SYMBOL_GPL(vmbus_free_ring); | ||||||
| 
 | 
 | ||||||
| 	newchannel->onchannel_callback = onchannelcallback; | /* vmbus_alloc_ring - allocate and map pages for ring buffer */ | ||||||
| 	newchannel->channel_callback_context = context; | int vmbus_alloc_ring(struct vmbus_channel *newchannel, | ||||||
|  | 		     u32 send_size, u32 recv_size) | ||||||
|  | { | ||||||
|  | 	struct page *page; | ||||||
|  | 	int order; | ||||||
|  | 
 | ||||||
|  | 	if (send_size % PAGE_SIZE || recv_size % PAGE_SIZE) | ||||||
|  | 		return -EINVAL; | ||||||
| 
 | 
 | ||||||
| 	/* Allocate the ring buffer */ | 	/* Allocate the ring buffer */ | ||||||
|  | 	order = get_order(send_size + recv_size); | ||||||
| 	page = alloc_pages_node(cpu_to_node(newchannel->target_cpu), | 	page = alloc_pages_node(cpu_to_node(newchannel->target_cpu), | ||||||
| 				GFP_KERNEL|__GFP_ZERO, order); | 				GFP_KERNEL|__GFP_ZERO, order); | ||||||
| 
 | 
 | ||||||
| 	if (!page) | 	if (!page) | ||||||
| 		page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order); | 		page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order); | ||||||
| 
 | 
 | ||||||
| 	if (!page) { | 	if (!page) | ||||||
| 		err = -ENOMEM; | 		return -ENOMEM; | ||||||
| 		goto error_set_chnstate; |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	newchannel->ringbuffer_page = page; | 	newchannel->ringbuffer_page = page; | ||||||
| 	newchannel->ringbuffer_pagecount = (send_ringbuffer_size + | 	newchannel->ringbuffer_pagecount = (send_size + recv_size) >> PAGE_SHIFT; | ||||||
| 					   recv_ringbuffer_size) >> PAGE_SHIFT; | 	newchannel->ringbuffer_send_offset = send_size >> PAGE_SHIFT; | ||||||
| 
 | 
 | ||||||
| 	ret = hv_ringbuffer_init(&newchannel->outbound, page, | 	return 0; | ||||||
| 				 send_ringbuffer_size >> PAGE_SHIFT); | } | ||||||
|  | EXPORT_SYMBOL_GPL(vmbus_alloc_ring); | ||||||
| 
 | 
 | ||||||
| 	if (ret != 0) { | static int __vmbus_open(struct vmbus_channel *newchannel, | ||||||
| 		err = ret; | 		       void *userdata, u32 userdatalen, | ||||||
| 		goto error_free_pages; | 		       void (*onchannelcallback)(void *context), void *context) | ||||||
|  | { | ||||||
|  | 	struct vmbus_channel_open_channel *open_msg; | ||||||
|  | 	struct vmbus_channel_msginfo *open_info = NULL; | ||||||
|  | 	struct page *page = newchannel->ringbuffer_page; | ||||||
|  | 	u32 send_pages, recv_pages; | ||||||
|  | 	unsigned long flags; | ||||||
|  | 	int err; | ||||||
|  | 
 | ||||||
|  | 	if (userdatalen > MAX_USER_DEFINED_BYTES) | ||||||
|  | 		return -EINVAL; | ||||||
|  | 
 | ||||||
|  | 	send_pages = newchannel->ringbuffer_send_offset; | ||||||
|  | 	recv_pages = newchannel->ringbuffer_pagecount - send_pages; | ||||||
|  | 
 | ||||||
|  | 	spin_lock_irqsave(&newchannel->lock, flags); | ||||||
|  | 	if (newchannel->state != CHANNEL_OPEN_STATE) { | ||||||
|  | 		spin_unlock_irqrestore(&newchannel->lock, flags); | ||||||
|  | 		return -EINVAL; | ||||||
| 	} | 	} | ||||||
|  | 	spin_unlock_irqrestore(&newchannel->lock, flags); | ||||||
| 
 | 
 | ||||||
| 	ret = hv_ringbuffer_init(&newchannel->inbound, | 	newchannel->state = CHANNEL_OPENING_STATE; | ||||||
| 				 &page[send_ringbuffer_size >> PAGE_SHIFT], | 	newchannel->onchannel_callback = onchannelcallback; | ||||||
| 				 recv_ringbuffer_size >> PAGE_SHIFT); | 	newchannel->channel_callback_context = context; | ||||||
| 	if (ret != 0) { |  | ||||||
| 		err = ret; |  | ||||||
| 		goto error_free_pages; |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
|  | 	err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages); | ||||||
|  | 	if (err) | ||||||
|  | 		goto error_clean_ring; | ||||||
|  | 
 | ||||||
|  | 	err = hv_ringbuffer_init(&newchannel->inbound, | ||||||
|  | 				 &page[send_pages], recv_pages); | ||||||
|  | 	if (err) | ||||||
|  | 		goto error_clean_ring; | ||||||
| 
 | 
 | ||||||
| 	/* Establish the gpadl for the ring buffer */ | 	/* Establish the gpadl for the ring buffer */ | ||||||
| 	newchannel->ringbuffer_gpadlhandle = 0; | 	newchannel->ringbuffer_gpadlhandle = 0; | ||||||
| 
 | 
 | ||||||
| 	ret = vmbus_establish_gpadl(newchannel, | 	err = vmbus_establish_gpadl(newchannel, | ||||||
| 				    page_address(page), | 				    page_address(newchannel->ringbuffer_page), | ||||||
| 				    send_ringbuffer_size + | 				    (send_pages + recv_pages) << PAGE_SHIFT, | ||||||
| 				    recv_ringbuffer_size, |  | ||||||
| 				    &newchannel->ringbuffer_gpadlhandle); | 				    &newchannel->ringbuffer_gpadlhandle); | ||||||
| 
 | 	if (err) | ||||||
| 	if (ret != 0) { | 		goto error_clean_ring; | ||||||
| 		err = ret; |  | ||||||
| 		goto error_free_pages; |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	/* Create and init the channel open message */ | 	/* Create and init the channel open message */ | ||||||
| 	open_info = kmalloc(sizeof(*open_info) + | 	open_info = kmalloc(sizeof(*open_info) + | ||||||
|  | @ -175,15 +187,9 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, | ||||||
| 	open_msg->openid = newchannel->offermsg.child_relid; | 	open_msg->openid = newchannel->offermsg.child_relid; | ||||||
| 	open_msg->child_relid = newchannel->offermsg.child_relid; | 	open_msg->child_relid = newchannel->offermsg.child_relid; | ||||||
| 	open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle; | 	open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle; | ||||||
| 	open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >> | 	open_msg->downstream_ringbuffer_pageoffset = newchannel->ringbuffer_send_offset; | ||||||
| 						  PAGE_SHIFT; |  | ||||||
| 	open_msg->target_vp = newchannel->target_vp; | 	open_msg->target_vp = newchannel->target_vp; | ||||||
| 
 | 
 | ||||||
| 	if (userdatalen > MAX_USER_DEFINED_BYTES) { |  | ||||||
| 		err = -EINVAL; |  | ||||||
| 		goto error_free_gpadl; |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if (userdatalen) | 	if (userdatalen) | ||||||
| 		memcpy(open_msg->userdata, userdata, userdatalen); | 		memcpy(open_msg->userdata, userdata, userdatalen); | ||||||
| 
 | 
 | ||||||
|  | @ -194,18 +200,16 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, | ||||||
| 
 | 
 | ||||||
| 	if (newchannel->rescind) { | 	if (newchannel->rescind) { | ||||||
| 		err = -ENODEV; | 		err = -ENODEV; | ||||||
| 		goto error_free_gpadl; | 		goto error_free_info; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	ret = vmbus_post_msg(open_msg, | 	err = vmbus_post_msg(open_msg, | ||||||
| 			     sizeof(struct vmbus_channel_open_channel), true); | 			     sizeof(struct vmbus_channel_open_channel), true); | ||||||
| 
 | 
 | ||||||
| 	trace_vmbus_open(open_msg, ret); | 	trace_vmbus_open(open_msg, err); | ||||||
| 
 | 
 | ||||||
| 	if (ret != 0) { | 	if (err != 0) | ||||||
| 		err = ret; |  | ||||||
| 		goto error_clean_msglist; | 		goto error_clean_msglist; | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	wait_for_completion(&open_info->waitevent); | 	wait_for_completion(&open_info->waitevent); | ||||||
| 
 | 
 | ||||||
|  | @ -215,12 +219,12 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, | ||||||
| 
 | 
 | ||||||
| 	if (newchannel->rescind) { | 	if (newchannel->rescind) { | ||||||
| 		err = -ENODEV; | 		err = -ENODEV; | ||||||
| 		goto error_free_gpadl; | 		goto error_free_info; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (open_info->response.open_result.status) { | 	if (open_info->response.open_result.status) { | ||||||
| 		err = -EAGAIN; | 		err = -EAGAIN; | ||||||
| 		goto error_free_gpadl; | 		goto error_free_info; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	newchannel->state = CHANNEL_OPENED_STATE; | 	newchannel->state = CHANNEL_OPENED_STATE; | ||||||
|  | @ -231,18 +235,50 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, | ||||||
| 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); | 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); | ||||||
| 	list_del(&open_info->msglistentry); | 	list_del(&open_info->msglistentry); | ||||||
| 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); | 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); | ||||||
| 
 | error_free_info: | ||||||
|  | 	kfree(open_info); | ||||||
| error_free_gpadl: | error_free_gpadl: | ||||||
| 	vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle); | 	vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle); | ||||||
| 	kfree(open_info); | 	newchannel->ringbuffer_gpadlhandle = 0; | ||||||
| error_free_pages: | error_clean_ring: | ||||||
| 	hv_ringbuffer_cleanup(&newchannel->outbound); | 	hv_ringbuffer_cleanup(&newchannel->outbound); | ||||||
| 	hv_ringbuffer_cleanup(&newchannel->inbound); | 	hv_ringbuffer_cleanup(&newchannel->inbound); | ||||||
| 	__free_pages(page, order); |  | ||||||
| error_set_chnstate: |  | ||||||
| 	newchannel->state = CHANNEL_OPEN_STATE; | 	newchannel->state = CHANNEL_OPEN_STATE; | ||||||
| 	return err; | 	return err; | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * vmbus_connect_ring - Open the channel but reuse ring buffer | ||||||
|  |  */ | ||||||
|  | int vmbus_connect_ring(struct vmbus_channel *newchannel, | ||||||
|  | 		       void (*onchannelcallback)(void *context), void *context) | ||||||
|  | { | ||||||
|  | 	return  __vmbus_open(newchannel, NULL, 0, onchannelcallback, context); | ||||||
|  | } | ||||||
|  | EXPORT_SYMBOL_GPL(vmbus_connect_ring); | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * vmbus_open - Open the specified channel. | ||||||
|  |  */ | ||||||
|  | int vmbus_open(struct vmbus_channel *newchannel, | ||||||
|  | 	       u32 send_ringbuffer_size, u32 recv_ringbuffer_size, | ||||||
|  | 	       void *userdata, u32 userdatalen, | ||||||
|  | 	       void (*onchannelcallback)(void *context), void *context) | ||||||
|  | { | ||||||
|  | 	int err; | ||||||
|  | 
 | ||||||
|  | 	err = vmbus_alloc_ring(newchannel, send_ringbuffer_size, | ||||||
|  | 			       recv_ringbuffer_size); | ||||||
|  | 	if (err) | ||||||
|  | 		return err; | ||||||
|  | 
 | ||||||
|  | 	err = __vmbus_open(newchannel, userdata, userdatalen, | ||||||
|  | 			   onchannelcallback, context); | ||||||
|  | 	if (err) | ||||||
|  | 		vmbus_free_ring(newchannel); | ||||||
|  | 
 | ||||||
|  | 	return err; | ||||||
|  | } | ||||||
| EXPORT_SYMBOL_GPL(vmbus_open); | EXPORT_SYMBOL_GPL(vmbus_open); | ||||||
| 
 | 
 | ||||||
| /* Used for Hyper-V Socket: a guest client's connect() to the host */ | /* Used for Hyper-V Socket: a guest client's connect() to the host */ | ||||||
|  | @ -610,10 +646,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel) | ||||||
| 	 * in Hyper-V Manager), the driver's remove() invokes vmbus_close(): | 	 * in Hyper-V Manager), the driver's remove() invokes vmbus_close(): | ||||||
| 	 * here we should skip most of the below cleanup work. | 	 * here we should skip most of the below cleanup work. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (channel->state != CHANNEL_OPENED_STATE) { | 	if (channel->state != CHANNEL_OPENED_STATE) | ||||||
| 		ret = -EINVAL; | 		return -EINVAL; | ||||||
| 		goto out; |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	channel->state = CHANNEL_OPEN_STATE; | 	channel->state = CHANNEL_OPEN_STATE; | ||||||
| 
 | 
 | ||||||
|  | @ -635,11 +669,10 @@ static int vmbus_close_internal(struct vmbus_channel *channel) | ||||||
| 		 * If we failed to post the close msg, | 		 * If we failed to post the close msg, | ||||||
| 		 * it is perhaps better to leak memory. | 		 * it is perhaps better to leak memory. | ||||||
| 		 */ | 		 */ | ||||||
| 		goto out; |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* Tear down the gpadl for the channel's ring buffer */ | 	/* Tear down the gpadl for the channel's ring buffer */ | ||||||
| 	if (channel->ringbuffer_gpadlhandle) { | 	else if (channel->ringbuffer_gpadlhandle) { | ||||||
| 		ret = vmbus_teardown_gpadl(channel, | 		ret = vmbus_teardown_gpadl(channel, | ||||||
| 					   channel->ringbuffer_gpadlhandle); | 					   channel->ringbuffer_gpadlhandle); | ||||||
| 		if (ret) { | 		if (ret) { | ||||||
|  | @ -648,59 +681,63 @@ static int vmbus_close_internal(struct vmbus_channel *channel) | ||||||
| 			 * If we failed to teardown gpadl, | 			 * If we failed to teardown gpadl, | ||||||
| 			 * it is perhaps better to leak memory. | 			 * it is perhaps better to leak memory. | ||||||
| 			 */ | 			 */ | ||||||
| 			goto out; |  | ||||||
| 		} | 		} | ||||||
|  | 
 | ||||||
|  | 		channel->ringbuffer_gpadlhandle = 0; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* Cleanup the ring buffers for this channel */ |  | ||||||
| 	hv_ringbuffer_cleanup(&channel->outbound); |  | ||||||
| 	hv_ringbuffer_cleanup(&channel->inbound); |  | ||||||
| 
 |  | ||||||
| 	__free_pages(channel->ringbuffer_page, |  | ||||||
| 		     get_order(channel->ringbuffer_pagecount << PAGE_SHIFT)); |  | ||||||
| 
 |  | ||||||
| out: |  | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /* disconnect ring - close all channels */ | ||||||
|  | int vmbus_disconnect_ring(struct vmbus_channel *channel) | ||||||
|  | { | ||||||
|  | 	struct vmbus_channel *cur_channel, *tmp; | ||||||
|  | 	unsigned long flags; | ||||||
|  | 	LIST_HEAD(list); | ||||||
|  | 	int ret; | ||||||
|  | 
 | ||||||
|  | 	if (channel->primary_channel != NULL) | ||||||
|  | 		return -EINVAL; | ||||||
|  | 
 | ||||||
|  | 	/* Snapshot the list of subchannels */ | ||||||
|  | 	spin_lock_irqsave(&channel->lock, flags); | ||||||
|  | 	list_splice_init(&channel->sc_list, &list); | ||||||
|  | 	channel->num_sc = 0; | ||||||
|  | 	spin_unlock_irqrestore(&channel->lock, flags); | ||||||
|  | 
 | ||||||
|  | 	list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) { | ||||||
|  | 		if (cur_channel->rescind) | ||||||
|  | 			wait_for_completion(&cur_channel->rescind_event); | ||||||
|  | 
 | ||||||
|  | 		mutex_lock(&vmbus_connection.channel_mutex); | ||||||
|  | 		if (vmbus_close_internal(cur_channel) == 0) { | ||||||
|  | 			vmbus_free_ring(cur_channel); | ||||||
|  | 
 | ||||||
|  | 			if (cur_channel->rescind) | ||||||
|  | 				hv_process_channel_removal(cur_channel); | ||||||
|  | 		} | ||||||
|  | 		mutex_unlock(&vmbus_connection.channel_mutex); | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * Now close the primary. | ||||||
|  | 	 */ | ||||||
|  | 	mutex_lock(&vmbus_connection.channel_mutex); | ||||||
|  | 	ret = vmbus_close_internal(channel); | ||||||
|  | 	mutex_unlock(&vmbus_connection.channel_mutex); | ||||||
|  | 
 | ||||||
|  | 	return ret; | ||||||
|  | } | ||||||
|  | EXPORT_SYMBOL_GPL(vmbus_disconnect_ring); | ||||||
|  | 
 | ||||||
| /*
 | /*
 | ||||||
|  * vmbus_close - Close the specified channel |  * vmbus_close - Close the specified channel | ||||||
|  */ |  */ | ||||||
| void vmbus_close(struct vmbus_channel *channel) | void vmbus_close(struct vmbus_channel *channel) | ||||||
| { | { | ||||||
| 	struct list_head *cur, *tmp; | 	if (vmbus_disconnect_ring(channel) == 0) | ||||||
| 	struct vmbus_channel *cur_channel; | 		vmbus_free_ring(channel); | ||||||
| 
 |  | ||||||
| 	if (channel->primary_channel != NULL) { |  | ||||||
| 		/*
 |  | ||||||
| 		 * We will only close sub-channels when |  | ||||||
| 		 * the primary is closed. |  | ||||||
| 		 */ |  | ||||||
| 		return; |  | ||||||
| 	} |  | ||||||
| 	/*
 |  | ||||||
| 	 * Close all the sub-channels first and then close the |  | ||||||
| 	 * primary channel. |  | ||||||
| 	 */ |  | ||||||
| 	list_for_each_safe(cur, tmp, &channel->sc_list) { |  | ||||||
| 		cur_channel = list_entry(cur, struct vmbus_channel, sc_list); |  | ||||||
| 		if (cur_channel->rescind) { |  | ||||||
| 			wait_for_completion(&cur_channel->rescind_event); |  | ||||||
| 			mutex_lock(&vmbus_connection.channel_mutex); |  | ||||||
| 			vmbus_close_internal(cur_channel); |  | ||||||
| 			hv_process_channel_removal(cur_channel); |  | ||||||
| 		} else { |  | ||||||
| 			mutex_lock(&vmbus_connection.channel_mutex); |  | ||||||
| 			vmbus_close_internal(cur_channel); |  | ||||||
| 		} |  | ||||||
| 		mutex_unlock(&vmbus_connection.channel_mutex); |  | ||||||
| 	} |  | ||||||
| 	/*
 |  | ||||||
| 	 * Now close the primary. |  | ||||||
| 	 */ |  | ||||||
| 	mutex_lock(&vmbus_connection.channel_mutex); |  | ||||||
| 	vmbus_close_internal(channel); |  | ||||||
| 	mutex_unlock(&vmbus_connection.channel_mutex); |  | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(vmbus_close); | EXPORT_SYMBOL_GPL(vmbus_close); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -241,6 +241,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, | ||||||
| void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) | ||||||
| { | { | ||||||
| 	vunmap(ring_info->ring_buffer); | 	vunmap(ring_info->ring_buffer); | ||||||
|  | 	ring_info->ring_buffer = NULL; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /* Write to the ring buffer. */ | /* Write to the ring buffer. */ | ||||||
|  |  | ||||||
|  | @ -741,6 +741,7 @@ struct vmbus_channel { | ||||||
| 	/* Allocated memory for ring buffer */ | 	/* Allocated memory for ring buffer */ | ||||||
| 	struct page *ringbuffer_page; | 	struct page *ringbuffer_page; | ||||||
| 	u32 ringbuffer_pagecount; | 	u32 ringbuffer_pagecount; | ||||||
|  | 	u32 ringbuffer_send_offset; | ||||||
| 	struct hv_ring_buffer_info outbound;	/* send to parent */ | 	struct hv_ring_buffer_info outbound;	/* send to parent */ | ||||||
| 	struct hv_ring_buffer_info inbound;	/* receive from parent */ | 	struct hv_ring_buffer_info inbound;	/* receive from parent */ | ||||||
| 
 | 
 | ||||||
|  | @ -1021,6 +1022,14 @@ struct vmbus_packet_mpb_array { | ||||||
| 	struct hv_mpb_array range; | 	struct hv_mpb_array range; | ||||||
| } __packed; | } __packed; | ||||||
| 
 | 
 | ||||||
|  | int vmbus_alloc_ring(struct vmbus_channel *channel, | ||||||
|  | 		     u32 send_size, u32 recv_size); | ||||||
|  | void vmbus_free_ring(struct vmbus_channel *channel); | ||||||
|  | 
 | ||||||
|  | int vmbus_connect_ring(struct vmbus_channel *channel, | ||||||
|  | 		       void (*onchannel_callback)(void *context), | ||||||
|  | 		       void *context); | ||||||
|  | int vmbus_disconnect_ring(struct vmbus_channel *channel); | ||||||
| 
 | 
 | ||||||
| extern int vmbus_open(struct vmbus_channel *channel, | extern int vmbus_open(struct vmbus_channel *channel, | ||||||
| 			    u32 send_ringbuffersize, | 			    u32 send_ringbuffersize, | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Stephen Hemminger
						Stephen Hemminger