forked from mirrors/linux
		
	crypto: ccree - protect against empty or NULL scatterlists
Deal gracefully with a NULL or empty scatterlist which can happen if both cryptlen and assoclen are zero and we're doing in-place AEAD encryption. This fixes a crash when this causes us to try and map a NULL page, at least with some platforms / DMA mapping configs. Cc: stable@vger.kernel.org # v4.19+ Reported-by: Geert Uytterhoeven <geert+renesas@glider.be> Tested-by: Geert Uytterhoeven <geert+renesas@glider.be> Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
		
							parent
							
								
									07b586fe06
								
							
						
					
					
						commit
						ce0fc6db38
					
				
					 2 changed files with 28 additions and 35 deletions
				
			
		| 
						 | 
					@ -87,6 +87,8 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned int nents = 0;
 | 
						unsigned int nents = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						*lbytes = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (nbytes && sg_list) {
 | 
						while (nbytes && sg_list) {
 | 
				
			||||||
		nents++;
 | 
							nents++;
 | 
				
			||||||
		/* get the number of bytes in the last entry */
 | 
							/* get the number of bytes in the last entry */
 | 
				
			||||||
| 
						 | 
					@ -95,6 +97,7 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
 | 
				
			||||||
				nbytes : sg_list->length;
 | 
									nbytes : sg_list->length;
 | 
				
			||||||
		sg_list = sg_next(sg_list);
 | 
							sg_list = sg_next(sg_list);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
 | 
						dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
 | 
				
			||||||
	return nents;
 | 
						return nents;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -290,19 +293,8 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 | 
				
			||||||
		     unsigned int nbytes, int direction, u32 *nents,
 | 
							     unsigned int nbytes, int direction, u32 *nents,
 | 
				
			||||||
		     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
 | 
							     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (sg_is_last(sg)) {
 | 
						int ret = 0;
 | 
				
			||||||
		/* One entry only case -set to DLLI */
 | 
					
 | 
				
			||||||
		if (dma_map_sg(dev, sg, 1, direction) != 1) {
 | 
					 | 
				
			||||||
			dev_err(dev, "dma_map_sg() single buffer failed\n");
 | 
					 | 
				
			||||||
			return -ENOMEM;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 | 
					 | 
				
			||||||
			&sg_dma_address(sg), sg_page(sg), sg_virt(sg),
 | 
					 | 
				
			||||||
			sg->offset, sg->length);
 | 
					 | 
				
			||||||
		*lbytes = nbytes;
 | 
					 | 
				
			||||||
		*nents = 1;
 | 
					 | 
				
			||||||
		*mapped_nents = 1;
 | 
					 | 
				
			||||||
	} else {  /*sg_is_last*/
 | 
					 | 
				
			||||||
	*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
 | 
						*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
 | 
				
			||||||
	if (*nents > max_sg_nents) {
 | 
						if (*nents > max_sg_nents) {
 | 
				
			||||||
		*nents = 0;
 | 
							*nents = 0;
 | 
				
			||||||
| 
						 | 
					@ -310,16 +302,15 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 | 
				
			||||||
			*nents, max_sg_nents);
 | 
								*nents, max_sg_nents);
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
		/* In case of mmu the number of mapped nents might
 | 
					
 | 
				
			||||||
		 * be changed from the original sgl nents
 | 
						ret = dma_map_sg(dev, sg, *nents, direction);
 | 
				
			||||||
		 */
 | 
						if (dma_mapping_error(dev, ret)) {
 | 
				
			||||||
		*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
 | 
					 | 
				
			||||||
		if (*mapped_nents == 0) {
 | 
					 | 
				
			||||||
		*nents = 0;
 | 
							*nents = 0;
 | 
				
			||||||
			dev_err(dev, "dma_map_sg() sg buffer failed\n");
 | 
							dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	}
 | 
					
 | 
				
			||||||
 | 
						*mapped_nents = ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -555,11 +546,12 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 | 
				
			||||||
		sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
 | 
							sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
 | 
				
			||||||
		areq_ctx->assoclen, req->cryptlen);
 | 
							areq_ctx->assoclen, req->cryptlen);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
 | 
						dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
 | 
				
			||||||
 | 
							     DMA_BIDIRECTIONAL);
 | 
				
			||||||
	if (req->src != req->dst) {
 | 
						if (req->src != req->dst) {
 | 
				
			||||||
		dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
 | 
							dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
 | 
				
			||||||
			sg_virt(req->dst));
 | 
								sg_virt(req->dst));
 | 
				
			||||||
		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
 | 
							dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
 | 
				
			||||||
			     DMA_BIDIRECTIONAL);
 | 
								     DMA_BIDIRECTIONAL);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if (drvdata->coherent &&
 | 
						if (drvdata->coherent &&
 | 
				
			||||||
| 
						 | 
					@ -881,7 +873,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 | 
				
			||||||
					    &src_last_bytes);
 | 
										    &src_last_bytes);
 | 
				
			||||||
	sg_index = areq_ctx->src_sgl->length;
 | 
						sg_index = areq_ctx->src_sgl->length;
 | 
				
			||||||
	//check where the data starts
 | 
						//check where the data starts
 | 
				
			||||||
	while (sg_index <= size_to_skip) {
 | 
						while (src_mapped_nents && (sg_index <= size_to_skip)) {
 | 
				
			||||||
		src_mapped_nents--;
 | 
							src_mapped_nents--;
 | 
				
			||||||
		offset -= areq_ctx->src_sgl->length;
 | 
							offset -= areq_ctx->src_sgl->length;
 | 
				
			||||||
		sgl = sg_next(areq_ctx->src_sgl);
 | 
							sgl = sg_next(areq_ctx->src_sgl);
 | 
				
			||||||
| 
						 | 
					@ -908,7 +900,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 | 
				
			||||||
			size_for_map += crypto_aead_ivsize(tfm);
 | 
								size_for_map += crypto_aead_ivsize(tfm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
 | 
							rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
 | 
				
			||||||
			       &areq_ctx->dst.nents,
 | 
								       &areq_ctx->dst.mapped_nents,
 | 
				
			||||||
			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
 | 
								       LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
 | 
				
			||||||
			       &dst_mapped_nents);
 | 
								       &dst_mapped_nents);
 | 
				
			||||||
		if (rc)
 | 
							if (rc)
 | 
				
			||||||
| 
						 | 
					@ -921,7 +913,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 | 
				
			||||||
	offset = size_to_skip;
 | 
						offset = size_to_skip;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	//check where the data starts
 | 
						//check where the data starts
 | 
				
			||||||
	while (sg_index <= size_to_skip) {
 | 
						while (dst_mapped_nents && sg_index <= size_to_skip) {
 | 
				
			||||||
		dst_mapped_nents--;
 | 
							dst_mapped_nents--;
 | 
				
			||||||
		offset -= areq_ctx->dst_sgl->length;
 | 
							offset -= areq_ctx->dst_sgl->length;
 | 
				
			||||||
		sgl = sg_next(areq_ctx->dst_sgl);
 | 
							sgl = sg_next(areq_ctx->dst_sgl);
 | 
				
			||||||
| 
						 | 
					@ -1123,7 +1115,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
 | 
				
			||||||
	if (is_gcm4543)
 | 
						if (is_gcm4543)
 | 
				
			||||||
		size_to_map += crypto_aead_ivsize(tfm);
 | 
							size_to_map += crypto_aead_ivsize(tfm);
 | 
				
			||||||
	rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
 | 
						rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
 | 
				
			||||||
		       &areq_ctx->src.nents,
 | 
							       &areq_ctx->src.mapped_nents,
 | 
				
			||||||
		       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
 | 
							       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
 | 
				
			||||||
			LLI_MAX_NUM_OF_DATA_ENTRIES),
 | 
								LLI_MAX_NUM_OF_DATA_ENTRIES),
 | 
				
			||||||
		       &dummy, &mapped_nents);
 | 
							       &dummy, &mapped_nents);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -25,6 +25,7 @@ enum cc_sg_cpy_direct {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct cc_mlli {
 | 
					struct cc_mlli {
 | 
				
			||||||
	cc_sram_addr_t sram_addr;
 | 
						cc_sram_addr_t sram_addr;
 | 
				
			||||||
 | 
						unsigned int mapped_nents;
 | 
				
			||||||
	unsigned int nents; //sg nents
 | 
						unsigned int nents; //sg nents
 | 
				
			||||||
	unsigned int mlli_nents; //mlli nents might be different than the above
 | 
						unsigned int mlli_nents; //mlli nents might be different than the above
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue