forked from mirrors/linux
		
	net/tls: Use cipher sizes structs
Use the newly introduced cipher sizes structs instead of the repeated switch cases churn. Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Gal Pressman <gal@nvidia.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
		
							parent
							
								
									2d2c5ea242
								
							
						
					
					
						commit
						ea7a9d88ba
					
				
					 2 changed files with 76 additions and 51 deletions
				
			
		|  | @ -902,17 +902,27 @@ static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int | static int | ||||||
| tls_device_reencrypt(struct sock *sk, struct tls_sw_context_rx *sw_ctx) | tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx) | ||||||
| { | { | ||||||
|  | 	struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx); | ||||||
|  | 	const struct tls_cipher_size_desc *cipher_sz; | ||||||
| 	int err, offset, copy, data_len, pos; | 	int err, offset, copy, data_len, pos; | ||||||
| 	struct sk_buff *skb, *skb_iter; | 	struct sk_buff *skb, *skb_iter; | ||||||
| 	struct scatterlist sg[1]; | 	struct scatterlist sg[1]; | ||||||
| 	struct strp_msg *rxm; | 	struct strp_msg *rxm; | ||||||
| 	char *orig_buf, *buf; | 	char *orig_buf, *buf; | ||||||
| 
 | 
 | ||||||
|  | 	switch (tls_ctx->crypto_recv.info.cipher_type) { | ||||||
|  | 	case TLS_CIPHER_AES_GCM_128: | ||||||
|  | 		break; | ||||||
|  | 	default: | ||||||
|  | 		return -EINVAL; | ||||||
|  | 	} | ||||||
|  | 	cipher_sz = &tls_cipher_size_desc[tls_ctx->crypto_recv.info.cipher_type]; | ||||||
|  | 
 | ||||||
| 	rxm = strp_msg(tls_strp_msg(sw_ctx)); | 	rxm = strp_msg(tls_strp_msg(sw_ctx)); | ||||||
| 	orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + | 	orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv, | ||||||
| 			   TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation); | 			   sk->sk_allocation); | ||||||
| 	if (!orig_buf) | 	if (!orig_buf) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	buf = orig_buf; | 	buf = orig_buf; | ||||||
|  | @ -927,10 +937,8 @@ tls_device_reencrypt(struct sock *sk, struct tls_sw_context_rx *sw_ctx) | ||||||
| 
 | 
 | ||||||
| 	sg_init_table(sg, 1); | 	sg_init_table(sg, 1); | ||||||
| 	sg_set_buf(&sg[0], buf, | 	sg_set_buf(&sg[0], buf, | ||||||
| 		   rxm->full_len + TLS_HEADER_SIZE + | 		   rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv); | ||||||
| 		   TLS_CIPHER_AES_GCM_128_IV_SIZE); | 	err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_sz->iv); | ||||||
| 	err = skb_copy_bits(skb, offset, buf, |  | ||||||
| 			    TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE); |  | ||||||
| 	if (err) | 	if (err) | ||||||
| 		goto free_buf; | 		goto free_buf; | ||||||
| 
 | 
 | ||||||
|  | @ -941,7 +949,7 @@ tls_device_reencrypt(struct sock *sk, struct tls_sw_context_rx *sw_ctx) | ||||||
| 	else | 	else | ||||||
| 		err = 0; | 		err = 0; | ||||||
| 
 | 
 | ||||||
| 	data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE; | 	data_len = rxm->full_len - cipher_sz->tag; | ||||||
| 
 | 
 | ||||||
| 	if (skb_pagelen(skb) > offset) { | 	if (skb_pagelen(skb) > offset) { | ||||||
| 		copy = min_t(int, skb_pagelen(skb) - offset, data_len); | 		copy = min_t(int, skb_pagelen(skb) - offset, data_len); | ||||||
|  | @ -1024,7 +1032,7 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx) | ||||||
| 		 * likely have initial fragments decrypted, and final ones not | 		 * likely have initial fragments decrypted, and final ones not | ||||||
| 		 * decrypted. We need to reencrypt that single SKB. | 		 * decrypted. We need to reencrypt that single SKB. | ||||||
| 		 */ | 		 */ | ||||||
| 		return tls_device_reencrypt(sk, sw_ctx); | 		return tls_device_reencrypt(sk, tls_ctx); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* Return immediately if the record is either entirely plaintext or
 | 	/* Return immediately if the record is either entirely plaintext or
 | ||||||
|  | @ -1041,7 +1049,7 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	ctx->resync_nh_reset = 1; | 	ctx->resync_nh_reset = 1; | ||||||
| 	return tls_device_reencrypt(sk, sw_ctx); | 	return tls_device_reencrypt(sk, tls_ctx); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void tls_device_attach(struct tls_context *ctx, struct sock *sk, | static void tls_device_attach(struct tls_context *ctx, struct sock *sk, | ||||||
|  | @ -1062,9 +1070,9 @@ static void tls_device_attach(struct tls_context *ctx, struct sock *sk, | ||||||
| 
 | 
 | ||||||
| int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) | int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) | ||||||
| { | { | ||||||
| 	u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size; |  | ||||||
| 	struct tls_context *tls_ctx = tls_get_ctx(sk); | 	struct tls_context *tls_ctx = tls_get_ctx(sk); | ||||||
| 	struct tls_prot_info *prot = &tls_ctx->prot_info; | 	struct tls_prot_info *prot = &tls_ctx->prot_info; | ||||||
|  | 	const struct tls_cipher_size_desc *cipher_sz; | ||||||
| 	struct tls_record_info *start_marker_record; | 	struct tls_record_info *start_marker_record; | ||||||
| 	struct tls_offload_context_tx *offload_ctx; | 	struct tls_offload_context_tx *offload_ctx; | ||||||
| 	struct tls_crypto_info *crypto_info; | 	struct tls_crypto_info *crypto_info; | ||||||
|  | @ -1099,12 +1107,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) | ||||||
| 
 | 
 | ||||||
| 	switch (crypto_info->cipher_type) { | 	switch (crypto_info->cipher_type) { | ||||||
| 	case TLS_CIPHER_AES_GCM_128: | 	case TLS_CIPHER_AES_GCM_128: | ||||||
| 		nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; |  | ||||||
| 		tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE; |  | ||||||
| 		iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; |  | ||||||
| 		iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv; | 		iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv; | ||||||
| 		rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE; |  | ||||||
| 		salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE; |  | ||||||
| 		rec_seq = | 		rec_seq = | ||||||
| 		 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; | 		 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; | ||||||
| 		break; | 		break; | ||||||
|  | @ -1112,31 +1115,31 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) | ||||||
| 		rc = -EINVAL; | 		rc = -EINVAL; | ||||||
| 		goto release_netdev; | 		goto release_netdev; | ||||||
| 	} | 	} | ||||||
|  | 	cipher_sz = &tls_cipher_size_desc[crypto_info->cipher_type]; | ||||||
| 
 | 
 | ||||||
| 	/* Sanity-check the rec_seq_size for stack allocations */ | 	/* Sanity-check the rec_seq_size for stack allocations */ | ||||||
| 	if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) { | 	if (cipher_sz->rec_seq > TLS_MAX_REC_SEQ_SIZE) { | ||||||
| 		rc = -EINVAL; | 		rc = -EINVAL; | ||||||
| 		goto release_netdev; | 		goto release_netdev; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	prot->version = crypto_info->version; | 	prot->version = crypto_info->version; | ||||||
| 	prot->cipher_type = crypto_info->cipher_type; | 	prot->cipher_type = crypto_info->cipher_type; | ||||||
| 	prot->prepend_size = TLS_HEADER_SIZE + nonce_size; | 	prot->prepend_size = TLS_HEADER_SIZE + cipher_sz->iv; | ||||||
| 	prot->tag_size = tag_size; | 	prot->tag_size = cipher_sz->tag; | ||||||
| 	prot->overhead_size = prot->prepend_size + prot->tag_size; | 	prot->overhead_size = prot->prepend_size + prot->tag_size; | ||||||
| 	prot->iv_size = iv_size; | 	prot->iv_size = cipher_sz->iv; | ||||||
| 	prot->salt_size = salt_size; | 	prot->salt_size = cipher_sz->salt; | ||||||
| 	ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, | 	ctx->tx.iv = kmalloc(cipher_sz->iv + cipher_sz->salt, GFP_KERNEL); | ||||||
| 			     GFP_KERNEL); |  | ||||||
| 	if (!ctx->tx.iv) { | 	if (!ctx->tx.iv) { | ||||||
| 		rc = -ENOMEM; | 		rc = -ENOMEM; | ||||||
| 		goto release_netdev; | 		goto release_netdev; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size); | 	memcpy(ctx->tx.iv + cipher_sz->salt, iv, cipher_sz->iv); | ||||||
| 
 | 
 | ||||||
| 	prot->rec_seq_size = rec_seq_size; | 	prot->rec_seq_size = cipher_sz->rec_seq; | ||||||
| 	ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL); | 	ctx->tx.rec_seq = kmemdup(rec_seq, cipher_sz->rec_seq, GFP_KERNEL); | ||||||
| 	if (!ctx->tx.rec_seq) { | 	if (!ctx->tx.rec_seq) { | ||||||
| 		rc = -ENOMEM; | 		rc = -ENOMEM; | ||||||
| 		goto free_iv; | 		goto free_iv; | ||||||
|  |  | ||||||
|  | @ -54,13 +54,24 @@ static int tls_enc_record(struct aead_request *aead_req, | ||||||
| 			  struct scatter_walk *out, int *in_len, | 			  struct scatter_walk *out, int *in_len, | ||||||
| 			  struct tls_prot_info *prot) | 			  struct tls_prot_info *prot) | ||||||
| { | { | ||||||
| 	unsigned char buf[TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE]; | 	unsigned char buf[TLS_HEADER_SIZE + MAX_IV_SIZE]; | ||||||
|  | 	const struct tls_cipher_size_desc *cipher_sz; | ||||||
| 	struct scatterlist sg_in[3]; | 	struct scatterlist sg_in[3]; | ||||||
| 	struct scatterlist sg_out[3]; | 	struct scatterlist sg_out[3]; | ||||||
|  | 	unsigned int buf_size; | ||||||
| 	u16 len; | 	u16 len; | ||||||
| 	int rc; | 	int rc; | ||||||
| 
 | 
 | ||||||
| 	len = min_t(int, *in_len, ARRAY_SIZE(buf)); | 	switch (prot->cipher_type) { | ||||||
|  | 	case TLS_CIPHER_AES_GCM_128: | ||||||
|  | 		break; | ||||||
|  | 	default: | ||||||
|  | 		return -EINVAL; | ||||||
|  | 	} | ||||||
|  | 	cipher_sz = &tls_cipher_size_desc[prot->cipher_type]; | ||||||
|  | 
 | ||||||
|  | 	buf_size = TLS_HEADER_SIZE + cipher_sz->iv; | ||||||
|  | 	len = min_t(int, *in_len, buf_size); | ||||||
| 
 | 
 | ||||||
| 	scatterwalk_copychunks(buf, in, len, 0); | 	scatterwalk_copychunks(buf, in, len, 0); | ||||||
| 	scatterwalk_copychunks(buf, out, len, 1); | 	scatterwalk_copychunks(buf, out, len, 1); | ||||||
|  | @ -73,13 +84,11 @@ static int tls_enc_record(struct aead_request *aead_req, | ||||||
| 	scatterwalk_pagedone(out, 1, 1); | 	scatterwalk_pagedone(out, 1, 1); | ||||||
| 
 | 
 | ||||||
| 	len = buf[4] | (buf[3] << 8); | 	len = buf[4] | (buf[3] << 8); | ||||||
| 	len -= TLS_CIPHER_AES_GCM_128_IV_SIZE; | 	len -= cipher_sz->iv; | ||||||
| 
 | 
 | ||||||
| 	tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE, | 	tls_make_aad(aad, len - cipher_sz->tag, (char *)&rcd_sn, buf[0], prot); | ||||||
| 		(char *)&rcd_sn, buf[0], prot); |  | ||||||
| 
 | 
 | ||||||
| 	memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE, | 	memcpy(iv + cipher_sz->salt, buf + TLS_HEADER_SIZE, cipher_sz->iv); | ||||||
| 	       TLS_CIPHER_AES_GCM_128_IV_SIZE); |  | ||||||
| 
 | 
 | ||||||
| 	sg_init_table(sg_in, ARRAY_SIZE(sg_in)); | 	sg_init_table(sg_in, ARRAY_SIZE(sg_in)); | ||||||
| 	sg_init_table(sg_out, ARRAY_SIZE(sg_out)); | 	sg_init_table(sg_out, ARRAY_SIZE(sg_out)); | ||||||
|  | @ -90,7 +99,7 @@ static int tls_enc_record(struct aead_request *aead_req, | ||||||
| 
 | 
 | ||||||
| 	*in_len -= len; | 	*in_len -= len; | ||||||
| 	if (*in_len < 0) { | 	if (*in_len < 0) { | ||||||
| 		*in_len += TLS_CIPHER_AES_GCM_128_TAG_SIZE; | 		*in_len += cipher_sz->tag; | ||||||
| 		/* the input buffer doesn't contain the entire record.
 | 		/* the input buffer doesn't contain the entire record.
 | ||||||
| 		 * trim len accordingly. The resulting authentication tag | 		 * trim len accordingly. The resulting authentication tag | ||||||
| 		 * will contain garbage, but we don't care, so we won't | 		 * will contain garbage, but we don't care, so we won't | ||||||
|  | @ -111,7 +120,7 @@ static int tls_enc_record(struct aead_request *aead_req, | ||||||
| 		scatterwalk_pagedone(out, 1, 1); | 		scatterwalk_pagedone(out, 1, 1); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	len -= TLS_CIPHER_AES_GCM_128_TAG_SIZE; | 	len -= cipher_sz->tag; | ||||||
| 	aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv); | 	aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv); | ||||||
| 
 | 
 | ||||||
| 	rc = crypto_aead_encrypt(aead_req); | 	rc = crypto_aead_encrypt(aead_req); | ||||||
|  | @ -299,11 +308,14 @@ static void fill_sg_out(struct scatterlist sg_out[3], void *buf, | ||||||
| 			int sync_size, | 			int sync_size, | ||||||
| 			void *dummy_buf) | 			void *dummy_buf) | ||||||
| { | { | ||||||
|  | 	const struct tls_cipher_size_desc *cipher_sz = | ||||||
|  | 		&tls_cipher_size_desc[tls_ctx->crypto_send.info.cipher_type]; | ||||||
|  | 
 | ||||||
| 	sg_set_buf(&sg_out[0], dummy_buf, sync_size); | 	sg_set_buf(&sg_out[0], dummy_buf, sync_size); | ||||||
| 	sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len); | 	sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len); | ||||||
| 	/* Add room for authentication tag produced by crypto */ | 	/* Add room for authentication tag produced by crypto */ | ||||||
| 	dummy_buf += sync_size; | 	dummy_buf += sync_size; | ||||||
| 	sg_set_buf(&sg_out[2], dummy_buf, TLS_CIPHER_AES_GCM_128_TAG_SIZE); | 	sg_set_buf(&sg_out[2], dummy_buf, cipher_sz->tag); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, | static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, | ||||||
|  | @ -315,7 +327,8 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, | ||||||
| 	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); | 	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); | ||||||
| 	int tcp_payload_offset = skb_tcp_all_headers(skb); | 	int tcp_payload_offset = skb_tcp_all_headers(skb); | ||||||
| 	int payload_len = skb->len - tcp_payload_offset; | 	int payload_len = skb->len - tcp_payload_offset; | ||||||
| 	void *buf, *iv, *aad, *dummy_buf; | 	const struct tls_cipher_size_desc *cipher_sz; | ||||||
|  | 	void *buf, *iv, *aad, *dummy_buf, *salt; | ||||||
| 	struct aead_request *aead_req; | 	struct aead_request *aead_req; | ||||||
| 	struct sk_buff *nskb = NULL; | 	struct sk_buff *nskb = NULL; | ||||||
| 	int buf_len; | 	int buf_len; | ||||||
|  | @ -324,20 +337,23 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, | ||||||
| 	if (!aead_req) | 	if (!aead_req) | ||||||
| 		return NULL; | 		return NULL; | ||||||
| 
 | 
 | ||||||
| 	buf_len = TLS_CIPHER_AES_GCM_128_SALT_SIZE + | 	switch (tls_ctx->crypto_send.info.cipher_type) { | ||||||
| 		  TLS_CIPHER_AES_GCM_128_IV_SIZE + | 	case TLS_CIPHER_AES_GCM_128: | ||||||
| 		  TLS_AAD_SPACE_SIZE + | 		salt = tls_ctx->crypto_send.aes_gcm_128.salt; | ||||||
| 		  sync_size + | 		break; | ||||||
| 		  TLS_CIPHER_AES_GCM_128_TAG_SIZE; | 	default: | ||||||
|  | 		return NULL; | ||||||
|  | 	} | ||||||
|  | 	cipher_sz = &tls_cipher_size_desc[tls_ctx->crypto_send.info.cipher_type]; | ||||||
|  | 	buf_len = cipher_sz->salt + cipher_sz->iv + TLS_AAD_SPACE_SIZE + | ||||||
|  | 		  sync_size + cipher_sz->tag; | ||||||
| 	buf = kmalloc(buf_len, GFP_ATOMIC); | 	buf = kmalloc(buf_len, GFP_ATOMIC); | ||||||
| 	if (!buf) | 	if (!buf) | ||||||
| 		goto free_req; | 		goto free_req; | ||||||
| 
 | 
 | ||||||
| 	iv = buf; | 	iv = buf; | ||||||
| 	memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt, | 	memcpy(iv, salt, cipher_sz->salt); | ||||||
| 	       TLS_CIPHER_AES_GCM_128_SALT_SIZE); | 	aad = buf + cipher_sz->salt + cipher_sz->iv; | ||||||
| 	aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE + |  | ||||||
| 	      TLS_CIPHER_AES_GCM_128_IV_SIZE; |  | ||||||
| 	dummy_buf = aad + TLS_AAD_SPACE_SIZE; | 	dummy_buf = aad + TLS_AAD_SPACE_SIZE; | ||||||
| 
 | 
 | ||||||
| 	nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC); | 	nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC); | ||||||
|  | @ -451,6 +467,7 @@ int tls_sw_fallback_init(struct sock *sk, | ||||||
| 			 struct tls_offload_context_tx *offload_ctx, | 			 struct tls_offload_context_tx *offload_ctx, | ||||||
| 			 struct tls_crypto_info *crypto_info) | 			 struct tls_crypto_info *crypto_info) | ||||||
| { | { | ||||||
|  | 	const struct tls_cipher_size_desc *cipher_sz; | ||||||
| 	const u8 *key; | 	const u8 *key; | ||||||
| 	int rc; | 	int rc; | ||||||
| 
 | 
 | ||||||
|  | @ -463,15 +480,20 @@ int tls_sw_fallback_init(struct sock *sk, | ||||||
| 		goto err_out; | 		goto err_out; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	switch (crypto_info->cipher_type) { | ||||||
|  | 	case TLS_CIPHER_AES_GCM_128: | ||||||
| 		key = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->key; | 		key = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->key; | ||||||
|  | 		break; | ||||||
|  | 	default: | ||||||
|  | 		return -EINVAL; | ||||||
|  | 	} | ||||||
|  | 	cipher_sz = &tls_cipher_size_desc[crypto_info->cipher_type]; | ||||||
| 
 | 
 | ||||||
| 	rc = crypto_aead_setkey(offload_ctx->aead_send, key, | 	rc = crypto_aead_setkey(offload_ctx->aead_send, key, cipher_sz->key); | ||||||
| 				TLS_CIPHER_AES_GCM_128_KEY_SIZE); |  | ||||||
| 	if (rc) | 	if (rc) | ||||||
| 		goto free_aead; | 		goto free_aead; | ||||||
| 
 | 
 | ||||||
| 	rc = crypto_aead_setauthsize(offload_ctx->aead_send, | 	rc = crypto_aead_setauthsize(offload_ctx->aead_send, cipher_sz->tag); | ||||||
| 				     TLS_CIPHER_AES_GCM_128_TAG_SIZE); |  | ||||||
| 	if (rc) | 	if (rc) | ||||||
| 		goto free_aead; | 		goto free_aead; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Gal Pressman
						Gal Pressman