forked from mirrors/linux
		
	sctp: get netns from asoc and ep base
Commit 312434617c ("sctp: cache netns in sctp_ep_common") set netns
in asoc and ep base since they're created, and it will never change.
It's a better way to get netns from asoc and ep base, comparing to
calling sock_net().
This patch is to replace them.
v1->v2:
  - no change.
Suggested-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
			
			
This commit is contained in:
		
							parent
							
								
									26c97a2d82
								
							
						
					
					
						commit
						4e7696d90b
					
				
					 14 changed files with 49 additions and 62 deletions
				
			
		|  | @ -584,7 +584,6 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, | |||
| 					   const gfp_t gfp, | ||||
| 					   const int peer_state) | ||||
| { | ||||
| 	struct net *net = sock_net(asoc->base.sk); | ||||
| 	struct sctp_transport *peer; | ||||
| 	struct sctp_sock *sp; | ||||
| 	unsigned short port; | ||||
|  | @ -614,7 +613,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, | |||
| 		return peer; | ||||
| 	} | ||||
| 
 | ||||
| 	peer = sctp_transport_new(net, addr, gfp); | ||||
| 	peer = sctp_transport_new(asoc->base.net, addr, gfp); | ||||
| 	if (!peer) | ||||
| 		return NULL; | ||||
| 
 | ||||
|  | @ -974,7 +973,7 @@ static void sctp_assoc_bh_rcv(struct work_struct *work) | |||
| 	struct sctp_association *asoc = | ||||
| 		container_of(work, struct sctp_association, | ||||
| 			     base.inqueue.immediate); | ||||
| 	struct net *net = sock_net(asoc->base.sk); | ||||
| 	struct net *net = asoc->base.net; | ||||
| 	union sctp_subtype subtype; | ||||
| 	struct sctp_endpoint *ep; | ||||
| 	struct sctp_chunk *chunk; | ||||
|  | @ -1442,7 +1441,8 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc) | |||
| /* Should we send a SACK to update our peer? */ | ||||
| static inline bool sctp_peer_needs_update(struct sctp_association *asoc) | ||||
| { | ||||
| 	struct net *net = sock_net(asoc->base.sk); | ||||
| 	struct net *net = asoc->base.net; | ||||
| 
 | ||||
| 	switch (asoc->state) { | ||||
| 	case SCTP_STATE_ESTABLISHED: | ||||
| 	case SCTP_STATE_SHUTDOWN_PENDING: | ||||
|  | @ -1576,7 +1576,7 @@ int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, | |||
| 	if (asoc->peer.ipv6_address) | ||||
| 		flags |= SCTP_ADDR6_PEERSUPP; | ||||
| 
 | ||||
| 	return sctp_bind_addr_copy(sock_net(asoc->base.sk), | ||||
| 	return sctp_bind_addr_copy(asoc->base.net, | ||||
| 				   &asoc->base.bind_addr, | ||||
| 				   &asoc->ep->base.bind_addr, | ||||
| 				   scope, gfp, flags); | ||||
|  |  | |||
|  | @ -225,7 +225,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, | |||
| 	if (msg_len >= first_len) { | ||||
| 		msg->can_delay = 0; | ||||
| 		if (msg_len > first_len) | ||||
| 			SCTP_INC_STATS(sock_net(asoc->base.sk), | ||||
| 			SCTP_INC_STATS(asoc->base.net, | ||||
| 				       SCTP_MIB_FRAGUSRMSGS); | ||||
| 	} else { | ||||
| 		/* Which may be the only one... */ | ||||
|  |  | |||
|  | @ -244,7 +244,7 @@ struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep, | |||
| 	struct sctp_endpoint *retval = NULL; | ||||
| 
 | ||||
| 	if ((htons(ep->base.bind_addr.port) == laddr->v4.sin_port) && | ||||
| 	    net_eq(sock_net(ep->base.sk), net)) { | ||||
| 	    net_eq(ep->base.net, net)) { | ||||
| 		if (sctp_bind_addr_match(&ep->base.bind_addr, laddr, | ||||
| 					 sctp_sk(ep->base.sk))) | ||||
| 			retval = ep; | ||||
|  | @ -292,8 +292,8 @@ bool sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, | |||
| 				 const union sctp_addr *paddr) | ||||
| { | ||||
| 	struct sctp_sockaddr_entry *addr; | ||||
| 	struct net *net = ep->base.net; | ||||
| 	struct sctp_bind_addr *bp; | ||||
| 	struct net *net = sock_net(ep->base.sk); | ||||
| 
 | ||||
| 	bp = &ep->base.bind_addr; | ||||
| 	/* This function is called with the socket lock held,
 | ||||
|  | @ -384,7 +384,7 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work) | |||
| 		if (asoc && sctp_chunk_is_data(chunk)) | ||||
| 			asoc->peer.last_data_from = chunk->transport; | ||||
| 		else { | ||||
| 			SCTP_INC_STATS(sock_net(ep->base.sk), SCTP_MIB_INCTRLCHUNKS); | ||||
| 			SCTP_INC_STATS(ep->base.net, SCTP_MIB_INCTRLCHUNKS); | ||||
| 			if (asoc) | ||||
| 				asoc->stats.ictrlchunks++; | ||||
| 		} | ||||
|  |  | |||
|  | @ -937,7 +937,7 @@ int sctp_hash_transport(struct sctp_transport *t) | |||
| 	if (t->asoc->temp) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	arg.net   = sock_net(t->asoc->base.sk); | ||||
| 	arg.net   = t->asoc->base.net; | ||||
| 	arg.paddr = &t->ipaddr; | ||||
| 	arg.lport = htons(t->asoc->base.bind_addr.port); | ||||
| 
 | ||||
|  | @ -1004,12 +1004,11 @@ struct sctp_transport *sctp_epaddr_lookup_transport( | |||
| 				const struct sctp_endpoint *ep, | ||||
| 				const union sctp_addr *paddr) | ||||
| { | ||||
| 	struct net *net = sock_net(ep->base.sk); | ||||
| 	struct rhlist_head *tmp, *list; | ||||
| 	struct sctp_transport *t; | ||||
| 	struct sctp_hash_cmp_arg arg = { | ||||
| 		.paddr = paddr, | ||||
| 		.net   = net, | ||||
| 		.net   = ep->base.net, | ||||
| 		.lport = htons(ep->base.bind_addr.port), | ||||
| 	}; | ||||
| 
 | ||||
|  |  | |||
|  | @ -282,7 +282,7 @@ static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt, | |||
| 					sctp_chunk_free(sack); | ||||
| 					goto out; | ||||
| 				} | ||||
| 				SCTP_INC_STATS(sock_net(asoc->base.sk), | ||||
| 				SCTP_INC_STATS(asoc->base.net, | ||||
| 					       SCTP_MIB_OUTCTRLCHUNKS); | ||||
| 				asoc->stats.octrlchunks++; | ||||
| 				asoc->peer.sack_needed = 0; | ||||
|  |  | |||
|  | @ -279,7 +279,7 @@ void sctp_outq_free(struct sctp_outq *q) | |||
| /* Put a new chunk in an sctp_outq.  */ | ||||
| void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp) | ||||
| { | ||||
| 	struct net *net = sock_net(q->asoc->base.sk); | ||||
| 	struct net *net = q->asoc->base.net; | ||||
| 
 | ||||
| 	pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk, | ||||
| 		 chunk && chunk->chunk_hdr ? | ||||
|  | @ -533,7 +533,7 @@ void sctp_retransmit_mark(struct sctp_outq *q, | |||
| void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, | ||||
| 		     enum sctp_retransmit_reason reason) | ||||
| { | ||||
| 	struct net *net = sock_net(q->asoc->base.sk); | ||||
| 	struct net *net = q->asoc->base.net; | ||||
| 
 | ||||
| 	switch (reason) { | ||||
| 	case SCTP_RTXR_T3_RTX: | ||||
|  | @ -1884,6 +1884,6 @@ void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) | |||
| 
 | ||||
| 	if (ftsn_chunk) { | ||||
| 		list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); | ||||
| 		SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS); | ||||
| 		SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS); | ||||
| 	} | ||||
| } | ||||
|  |  | |||
|  | @ -2307,7 +2307,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, | |||
| 		      const union sctp_addr *peer_addr, | ||||
| 		      struct sctp_init_chunk *peer_init, gfp_t gfp) | ||||
| { | ||||
| 	struct net *net = sock_net(asoc->base.sk); | ||||
| 	struct sctp_transport *transport; | ||||
| 	struct list_head *pos, *temp; | ||||
| 	union sctp_params param; | ||||
|  | @ -2363,7 +2362,7 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, | |||
| 	 * also give us an option to silently ignore the packet, which | ||||
| 	 * is what we'll do here. | ||||
| 	 */ | ||||
| 	if (!net->sctp.addip_noauth && | ||||
| 	if (!asoc->base.net->sctp.addip_noauth && | ||||
| 	    (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) { | ||||
| 		asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP | | ||||
| 						  SCTP_PARAM_DEL_IP | | ||||
|  | @ -2491,9 +2490,9 @@ static int sctp_process_param(struct sctp_association *asoc, | |||
| 			      const union sctp_addr *peer_addr, | ||||
| 			      gfp_t gfp) | ||||
| { | ||||
| 	struct net *net = sock_net(asoc->base.sk); | ||||
| 	struct sctp_endpoint *ep = asoc->ep; | ||||
| 	union sctp_addr_param *addr_param; | ||||
| 	struct net *net = asoc->base.net; | ||||
| 	struct sctp_transport *t; | ||||
| 	enum sctp_scope scope; | ||||
| 	union sctp_addr addr; | ||||
|  |  | |||
|  | @ -516,8 +516,6 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands, | |||
| 					 struct sctp_transport *transport, | ||||
| 					 int is_hb) | ||||
| { | ||||
| 	struct net *net = sock_net(asoc->base.sk); | ||||
| 
 | ||||
| 	/* The check for association's overall error counter exceeding the
 | ||||
| 	 * threshold is done in the state function. | ||||
| 	 */ | ||||
|  | @ -544,10 +542,10 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands, | |||
| 	 * is SCTP_ACTIVE, then mark this transport as Partially Failed, | ||||
| 	 * see SCTP Quick Failover Draft, section 5.1 | ||||
| 	 */ | ||||
| 	if (net->sctp.pf_enable && | ||||
| 	   (transport->state == SCTP_ACTIVE) && | ||||
| 	   (transport->error_count < transport->pathmaxrxt) && | ||||
| 	   (transport->error_count > transport->pf_retrans)) { | ||||
| 	if (asoc->base.net->sctp.pf_enable && | ||||
| 	    transport->state == SCTP_ACTIVE && | ||||
| 	    transport->error_count < transport->pathmaxrxt && | ||||
| 	    transport->error_count > transport->pf_retrans) { | ||||
| 
 | ||||
| 		sctp_assoc_control_transport(asoc, transport, | ||||
| 					     SCTP_TRANSPORT_PF, | ||||
|  | @ -798,10 +796,8 @@ static int sctp_cmd_process_sack(struct sctp_cmd_seq *cmds, | |||
| 	int err = 0; | ||||
| 
 | ||||
| 	if (sctp_outq_sack(&asoc->outqueue, chunk)) { | ||||
| 		struct net *net = sock_net(asoc->base.sk); | ||||
| 
 | ||||
| 		/* There are no more TSNs awaiting SACK.  */ | ||||
| 		err = sctp_do_sm(net, SCTP_EVENT_T_OTHER, | ||||
| 		err = sctp_do_sm(asoc->base.net, SCTP_EVENT_T_OTHER, | ||||
| 				 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), | ||||
| 				 asoc->state, asoc->ep, asoc, NULL, | ||||
| 				 GFP_ATOMIC); | ||||
|  | @ -834,7 +830,7 @@ static void sctp_cmd_assoc_update(struct sctp_cmd_seq *cmds, | |||
| 				  struct sctp_association *asoc, | ||||
| 				  struct sctp_association *new) | ||||
| { | ||||
| 	struct net *net = sock_net(asoc->base.sk); | ||||
| 	struct net *net = asoc->base.net; | ||||
| 	struct sctp_chunk *abort; | ||||
| 
 | ||||
| 	if (!sctp_assoc_update(asoc, new)) | ||||
|  |  | |||
|  | @ -1320,7 +1320,7 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc, | |||
| 				       struct sctp_chunk *init, | ||||
| 				       struct sctp_cmd_seq *commands) | ||||
| { | ||||
| 	struct net *net = sock_net(new_asoc->base.sk); | ||||
| 	struct net *net = new_asoc->base.net; | ||||
| 	struct sctp_transport *new_addr; | ||||
| 	int ret = 1; | ||||
| 
 | ||||
|  |  | |||
|  | @ -436,7 +436,6 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) | |||
| static int sctp_send_asconf(struct sctp_association *asoc, | ||||
| 			    struct sctp_chunk *chunk) | ||||
| { | ||||
| 	struct net 	*net = sock_net(asoc->base.sk); | ||||
| 	int retval = 0; | ||||
| 
 | ||||
| 	/* If there is an outstanding ASCONF chunk, queue it for later
 | ||||
|  | @ -449,7 +448,7 @@ static int sctp_send_asconf(struct sctp_association *asoc, | |||
| 
 | ||||
| 	/* Hold the chunk until an ASCONF_ACK is received. */ | ||||
| 	sctp_chunk_hold(chunk); | ||||
| 	retval = sctp_primitive_ASCONF(net, asoc, chunk); | ||||
| 	retval = sctp_primitive_ASCONF(asoc->base.net, asoc, chunk); | ||||
| 	if (retval) | ||||
| 		sctp_chunk_free(chunk); | ||||
| 	else | ||||
|  | @ -2428,9 +2427,8 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, | |||
| 	int error; | ||||
| 
 | ||||
| 	if (params->spp_flags & SPP_HB_DEMAND && trans) { | ||||
| 		struct net *net = sock_net(trans->asoc->base.sk); | ||||
| 
 | ||||
| 		error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); | ||||
| 		error = sctp_primitive_REQUESTHEARTBEAT(trans->asoc->base.net, | ||||
| 							trans->asoc, trans); | ||||
| 		if (error) | ||||
| 			return error; | ||||
| 	} | ||||
|  | @ -5364,7 +5362,7 @@ struct sctp_transport *sctp_transport_get_next(struct net *net, | |||
| 		if (!sctp_transport_hold(t)) | ||||
| 			continue; | ||||
| 
 | ||||
| 		if (net_eq(sock_net(t->asoc->base.sk), net) && | ||||
| 		if (net_eq(t->asoc->base.net, net) && | ||||
| 		    t->asoc->peer.primary_path == t) | ||||
| 			break; | ||||
| 
 | ||||
|  |  | |||
|  | @ -218,10 +218,9 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new) | |||
| static int sctp_send_reconf(struct sctp_association *asoc, | ||||
| 			    struct sctp_chunk *chunk) | ||||
| { | ||||
| 	struct net *net = sock_net(asoc->base.sk); | ||||
| 	int retval = 0; | ||||
| 
 | ||||
| 	retval = sctp_primitive_RECONF(net, asoc, chunk); | ||||
| 	retval = sctp_primitive_RECONF(asoc->base.net, asoc, chunk); | ||||
| 	if (retval) | ||||
| 		sctp_chunk_free(chunk); | ||||
| 
 | ||||
|  |  | |||
|  | @ -241,9 +241,8 @@ static struct sctp_ulpevent *sctp_intl_retrieve_partial( | |||
| 	if (!first_frag) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), | ||||
| 					     &ulpq->reasm, first_frag, | ||||
| 					     last_frag); | ||||
| 	retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm, | ||||
| 					     first_frag, last_frag); | ||||
| 	if (retval) { | ||||
| 		sin->fsn = next_fsn; | ||||
| 		if (is_last) { | ||||
|  | @ -326,7 +325,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_reassembled( | |||
| 
 | ||||
| 	pd_point = sctp_sk(asoc->base.sk)->pd_point; | ||||
| 	if (pd_point && pd_point <= pd_len) { | ||||
| 		retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), | ||||
| 		retval = sctp_make_reassembled_event(asoc->base.net, | ||||
| 						     &ulpq->reasm, | ||||
| 						     pd_first, pd_last); | ||||
| 		if (retval) { | ||||
|  | @ -337,8 +336,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_reassembled( | |||
| 	goto out; | ||||
| 
 | ||||
| found: | ||||
| 	retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), | ||||
| 					     &ulpq->reasm, | ||||
| 	retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm, | ||||
| 					     first_frag, pos); | ||||
| 	if (retval) | ||||
| 		retval->msg_flags |= MSG_EOR; | ||||
|  | @ -630,7 +628,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo( | |||
| 	if (!first_frag) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), | ||||
| 	retval = sctp_make_reassembled_event(ulpq->asoc->base.net, | ||||
| 					     &ulpq->reasm_uo, first_frag, | ||||
| 					     last_frag); | ||||
| 	if (retval) { | ||||
|  | @ -716,7 +714,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo( | |||
| 
 | ||||
| 	pd_point = sctp_sk(asoc->base.sk)->pd_point; | ||||
| 	if (pd_point && pd_point <= pd_len) { | ||||
| 		retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), | ||||
| 		retval = sctp_make_reassembled_event(asoc->base.net, | ||||
| 						     &ulpq->reasm_uo, | ||||
| 						     pd_first, pd_last); | ||||
| 		if (retval) { | ||||
|  | @ -727,8 +725,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo( | |||
| 	goto out; | ||||
| 
 | ||||
| found: | ||||
| 	retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), | ||||
| 					     &ulpq->reasm_uo, | ||||
| 	retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm_uo, | ||||
| 					     first_frag, pos); | ||||
| 	if (retval) | ||||
| 		retval->msg_flags |= MSG_EOR; | ||||
|  | @ -814,7 +811,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq) | |||
| 		return NULL; | ||||
| 
 | ||||
| out: | ||||
| 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), | ||||
| 	retval = sctp_make_reassembled_event(ulpq->asoc->base.net, | ||||
| 					     &ulpq->reasm_uo, first_frag, | ||||
| 					     last_frag); | ||||
| 	if (retval) { | ||||
|  | @ -921,7 +918,7 @@ static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq) | |||
| 		return NULL; | ||||
| 
 | ||||
| out: | ||||
| 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), | ||||
| 	retval = sctp_make_reassembled_event(ulpq->asoc->base.net, | ||||
| 					     &ulpq->reasm, first_frag, | ||||
| 					     last_frag); | ||||
| 	if (retval) { | ||||
|  | @ -1159,7 +1156,7 @@ static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn) | |||
| 
 | ||||
| 	if (ftsn_chunk) { | ||||
| 		list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); | ||||
| 		SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS); | ||||
| 		SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -334,7 +334,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) | |||
| 		pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp); | ||||
| 
 | ||||
| 	if (tp->rttvar || tp->srtt) { | ||||
| 		struct net *net = sock_net(tp->asoc->base.sk); | ||||
| 		struct net *net = tp->asoc->base.net; | ||||
| 		/* 6.3.1 C3) When a new RTT measurement R' is made, set
 | ||||
| 		 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'| | ||||
| 		 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R' | ||||
|  |  | |||
|  | @ -486,10 +486,9 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ul | |||
| 		cevent = sctp_skb2event(pd_first); | ||||
| 		pd_point = sctp_sk(asoc->base.sk)->pd_point; | ||||
| 		if (pd_point && pd_point <= pd_len) { | ||||
| 			retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), | ||||
| 			retval = sctp_make_reassembled_event(asoc->base.net, | ||||
| 							     &ulpq->reasm, | ||||
| 							     pd_first, | ||||
| 							     pd_last); | ||||
| 							     pd_first, pd_last); | ||||
| 			if (retval) | ||||
| 				sctp_ulpq_set_pd(ulpq); | ||||
| 		} | ||||
|  | @ -497,7 +496,7 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ul | |||
| done: | ||||
| 	return retval; | ||||
| found: | ||||
| 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), | ||||
| 	retval = sctp_make_reassembled_event(ulpq->asoc->base.net, | ||||
| 					     &ulpq->reasm, first_frag, pos); | ||||
| 	if (retval) | ||||
| 		retval->msg_flags |= MSG_EOR; | ||||
|  | @ -563,8 +562,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq) | |||
| 	 * further. | ||||
| 	 */ | ||||
| done: | ||||
| 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), | ||||
| 					&ulpq->reasm, first_frag, last_frag); | ||||
| 	retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm, | ||||
| 					     first_frag, last_frag); | ||||
| 	if (retval && is_last) | ||||
| 		retval->msg_flags |= MSG_EOR; | ||||
| 
 | ||||
|  | @ -664,8 +663,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq) | |||
| 	 * further. | ||||
| 	 */ | ||||
| done: | ||||
| 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), | ||||
| 					&ulpq->reasm, first_frag, last_frag); | ||||
| 	retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm, | ||||
| 					     first_frag, last_frag); | ||||
| 	return retval; | ||||
| } | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Xin Long
						Xin Long