forked from mirrors/linux
		
	dmaengine: remove DMA_SG as it is dead code in kernel
There are no in kernel consumers for DMA_SG op. Removing operation, dead code, and test code in dmatest. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Cc: Gary Hook <gary.hook@amd.com> Cc: Ludovic Desroches <ludovic.desroches@microchip.com> Cc: Kedareswara rao Appana <appana.durga.rao@xilinx.com> Cc: Li Yang <leoyang.li@nxp.com> Cc: Michal Simek <michal.simek@xilinx.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
		
							parent
							
								
									61b5f54d8c
								
							
						
					
					
						commit
						c678fa6634
					
				
					 12 changed files with 5 additions and 786 deletions
				
			
		|  | @ -181,13 +181,6 @@ Currently, the types available are: | ||||||
|     - Used by the client drivers to register a callback that will be |     - Used by the client drivers to register a callback that will be | ||||||
|       called on a regular basis through the DMA controller interrupt |       called on a regular basis through the DMA controller interrupt | ||||||
| 
 | 
 | ||||||
|   * DMA_SG |  | ||||||
|     - The device supports memory to memory scatter-gather |  | ||||||
|       transfers. |  | ||||||
|     - Even though a plain memcpy can look like a particular case of a |  | ||||||
|       scatter-gather transfer, with a single chunk to transfer, it's a |  | ||||||
|       distinct transaction type in the mem2mem transfers case |  | ||||||
| 
 |  | ||||||
|   * DMA_PRIVATE |   * DMA_PRIVATE | ||||||
|     - The devices only supports slave transfers, and as such isn't |     - The devices only supports slave transfers, and as such isn't | ||||||
|       available for async transfers. |       available for async transfers. | ||||||
|  |  | ||||||
|  | @ -502,27 +502,6 @@ static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy( | ||||||
| 	return &desc->tx_desc; | 	return &desc->tx_desc; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static struct dma_async_tx_descriptor *ccp_prep_dma_sg( |  | ||||||
| 	struct dma_chan *dma_chan, struct scatterlist *dst_sg, |  | ||||||
| 	unsigned int dst_nents, struct scatterlist *src_sg, |  | ||||||
| 	unsigned int src_nents, unsigned long flags) |  | ||||||
| { |  | ||||||
| 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, |  | ||||||
| 						 dma_chan); |  | ||||||
| 	struct ccp_dma_desc *desc; |  | ||||||
| 
 |  | ||||||
| 	dev_dbg(chan->ccp->dev, |  | ||||||
| 		"%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n", |  | ||||||
| 		__func__, src_sg, src_nents, dst_sg, dst_nents, flags); |  | ||||||
| 
 |  | ||||||
| 	desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents, |  | ||||||
| 			       flags); |  | ||||||
| 	if (!desc) |  | ||||||
| 		return NULL; |  | ||||||
| 
 |  | ||||||
| 	return &desc->tx_desc; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt( | static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt( | ||||||
| 	struct dma_chan *dma_chan, unsigned long flags) | 	struct dma_chan *dma_chan, unsigned long flags) | ||||||
| { | { | ||||||
|  | @ -704,7 +683,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp) | ||||||
| 	dma_dev->directions = DMA_MEM_TO_MEM; | 	dma_dev->directions = DMA_MEM_TO_MEM; | ||||||
| 	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | 	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||||||
| 	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | 	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||||||
| 	dma_cap_set(DMA_SG, dma_dev->cap_mask); |  | ||||||
| 	dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); | 	dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); | ||||||
| 
 | 
 | ||||||
| 	/* The DMA channels for this device can be set to public or private,
 | 	/* The DMA channels for this device can be set to public or private,
 | ||||||
|  | @ -740,7 +718,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp) | ||||||
| 
 | 
 | ||||||
| 	dma_dev->device_free_chan_resources = ccp_free_chan_resources; | 	dma_dev->device_free_chan_resources = ccp_free_chan_resources; | ||||||
| 	dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy; | 	dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy; | ||||||
| 	dma_dev->device_prep_dma_sg = ccp_prep_dma_sg; |  | ||||||
| 	dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt; | 	dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt; | ||||||
| 	dma_dev->device_issue_pending = ccp_issue_pending; | 	dma_dev->device_issue_pending = ccp_issue_pending; | ||||||
| 	dma_dev->device_tx_status = ccp_tx_status; | 	dma_dev->device_tx_status = ccp_tx_status; | ||||||
|  |  | ||||||
|  | @ -1202,138 +1202,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | ||||||
| 	return NULL; | 	return NULL; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /**
 |  | ||||||
|  * atc_prep_dma_sg - prepare memory to memory scather-gather operation |  | ||||||
|  * @chan: the channel to prepare operation on |  | ||||||
|  * @dst_sg: destination scatterlist |  | ||||||
|  * @dst_nents: number of destination scatterlist entries |  | ||||||
|  * @src_sg: source scatterlist |  | ||||||
|  * @src_nents: number of source scatterlist entries |  | ||||||
|  * @flags: tx descriptor status flags |  | ||||||
|  */ |  | ||||||
| static struct dma_async_tx_descriptor * |  | ||||||
| atc_prep_dma_sg(struct dma_chan *chan, |  | ||||||
| 		struct scatterlist *dst_sg, unsigned int dst_nents, |  | ||||||
| 		struct scatterlist *src_sg, unsigned int src_nents, |  | ||||||
| 		unsigned long flags) |  | ||||||
| { |  | ||||||
| 	struct at_dma_chan	*atchan = to_at_dma_chan(chan); |  | ||||||
| 	struct at_desc		*desc = NULL; |  | ||||||
| 	struct at_desc		*first = NULL; |  | ||||||
| 	struct at_desc		*prev = NULL; |  | ||||||
| 	unsigned int		src_width; |  | ||||||
| 	unsigned int		dst_width; |  | ||||||
| 	size_t			xfer_count; |  | ||||||
| 	u32			ctrla; |  | ||||||
| 	u32			ctrlb; |  | ||||||
| 	size_t			dst_len = 0, src_len = 0; |  | ||||||
| 	dma_addr_t		dst = 0, src = 0; |  | ||||||
| 	size_t			len = 0, total_len = 0; |  | ||||||
| 
 |  | ||||||
| 	if (unlikely(dst_nents == 0 || src_nents == 0)) |  | ||||||
| 		return NULL; |  | ||||||
| 
 |  | ||||||
| 	if (unlikely(dst_sg == NULL || src_sg == NULL)) |  | ||||||
| 		return NULL; |  | ||||||
| 
 |  | ||||||
| 	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN |  | ||||||
| 		| ATC_SRC_ADDR_MODE_INCR |  | ||||||
| 		| ATC_DST_ADDR_MODE_INCR |  | ||||||
| 		| ATC_FC_MEM2MEM; |  | ||||||
| 
 |  | ||||||
| 	/*
 |  | ||||||
| 	 * loop until there is either no more source or no more destination |  | ||||||
| 	 * scatterlist entry |  | ||||||
| 	 */ |  | ||||||
| 	while (true) { |  | ||||||
| 
 |  | ||||||
| 		/* prepare the next transfer */ |  | ||||||
| 		if (dst_len == 0) { |  | ||||||
| 
 |  | ||||||
| 			/* no more destination scatterlist entries */ |  | ||||||
| 			if (!dst_sg || !dst_nents) |  | ||||||
| 				break; |  | ||||||
| 
 |  | ||||||
| 			dst = sg_dma_address(dst_sg); |  | ||||||
| 			dst_len = sg_dma_len(dst_sg); |  | ||||||
| 
 |  | ||||||
| 			dst_sg = sg_next(dst_sg); |  | ||||||
| 			dst_nents--; |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		if (src_len == 0) { |  | ||||||
| 
 |  | ||||||
| 			/* no more source scatterlist entries */ |  | ||||||
| 			if (!src_sg || !src_nents) |  | ||||||
| 				break; |  | ||||||
| 
 |  | ||||||
| 			src = sg_dma_address(src_sg); |  | ||||||
| 			src_len = sg_dma_len(src_sg); |  | ||||||
| 
 |  | ||||||
| 			src_sg = sg_next(src_sg); |  | ||||||
| 			src_nents--; |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		len = min_t(size_t, src_len, dst_len); |  | ||||||
| 		if (len == 0) |  | ||||||
| 			continue; |  | ||||||
| 
 |  | ||||||
| 		/* take care for the alignment */ |  | ||||||
| 		src_width = dst_width = atc_get_xfer_width(src, dst, len); |  | ||||||
| 
 |  | ||||||
| 		ctrla = ATC_SRC_WIDTH(src_width) | |  | ||||||
| 			ATC_DST_WIDTH(dst_width); |  | ||||||
| 
 |  | ||||||
| 		/*
 |  | ||||||
| 		 * The number of transfers to set up refer to the source width |  | ||||||
| 		 * that depends on the alignment. |  | ||||||
| 		 */ |  | ||||||
| 		xfer_count = len >> src_width; |  | ||||||
| 		if (xfer_count > ATC_BTSIZE_MAX) { |  | ||||||
| 			xfer_count = ATC_BTSIZE_MAX; |  | ||||||
| 			len = ATC_BTSIZE_MAX << src_width; |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		/* create the transfer */ |  | ||||||
| 		desc = atc_desc_get(atchan); |  | ||||||
| 		if (!desc) |  | ||||||
| 			goto err_desc_get; |  | ||||||
| 
 |  | ||||||
| 		desc->lli.saddr = src; |  | ||||||
| 		desc->lli.daddr = dst; |  | ||||||
| 		desc->lli.ctrla = ctrla | xfer_count; |  | ||||||
| 		desc->lli.ctrlb = ctrlb; |  | ||||||
| 
 |  | ||||||
| 		desc->txd.cookie = 0; |  | ||||||
| 		desc->len = len; |  | ||||||
| 
 |  | ||||||
| 		atc_desc_chain(&first, &prev, desc); |  | ||||||
| 
 |  | ||||||
| 		/* update the lengths and addresses for the next loop cycle */ |  | ||||||
| 		dst_len -= len; |  | ||||||
| 		src_len -= len; |  | ||||||
| 		dst += len; |  | ||||||
| 		src += len; |  | ||||||
| 
 |  | ||||||
| 		total_len += len; |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	/* First descriptor of the chain embedds additional information */ |  | ||||||
| 	first->txd.cookie = -EBUSY; |  | ||||||
| 	first->total_len = total_len; |  | ||||||
| 
 |  | ||||||
| 	/* set end-of-link to the last link descriptor of list*/ |  | ||||||
| 	set_desc_eol(desc); |  | ||||||
| 
 |  | ||||||
| 	first->txd.flags = flags; /* client is in control of this ack */ |  | ||||||
| 
 |  | ||||||
| 	return &first->txd; |  | ||||||
| 
 |  | ||||||
| err_desc_get: |  | ||||||
| 	atc_desc_put(atchan, first); |  | ||||||
| 	return NULL; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /**
 | /**
 | ||||||
|  * atc_dma_cyclic_check_values |  * atc_dma_cyclic_check_values | ||||||
|  * Check for too big/unaligned periods and unaligned DMA buffer |  * Check for too big/unaligned periods and unaligned DMA buffer | ||||||
|  | @ -1933,14 +1801,12 @@ static int __init at_dma_probe(struct platform_device *pdev) | ||||||
| 
 | 
 | ||||||
| 	/* setup platform data for each SoC */ | 	/* setup platform data for each SoC */ | ||||||
| 	dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); | 	dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); | ||||||
| 	dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask); |  | ||||||
| 	dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask); | 	dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask); | ||||||
| 	dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); | 	dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); | ||||||
| 	dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask); | 	dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask); | ||||||
| 	dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask); | 	dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask); | ||||||
| 	dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask); | 	dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask); | ||||||
| 	dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); | 	dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); | ||||||
| 	dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask); |  | ||||||
| 
 | 
 | ||||||
| 	/* get DMA parameters from controller type */ | 	/* get DMA parameters from controller type */ | ||||||
| 	plat_dat = at_dma_get_driver_data(pdev); | 	plat_dat = at_dma_get_driver_data(pdev); | ||||||
|  | @ -2078,16 +1944,12 @@ static int __init at_dma_probe(struct platform_device *pdev) | ||||||
| 		atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | 		atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask)) |  | ||||||
| 		atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg; |  | ||||||
| 
 |  | ||||||
| 	dma_writel(atdma, EN, AT_DMA_ENABLE); | 	dma_writel(atdma, EN, AT_DMA_ENABLE); | ||||||
| 
 | 
 | ||||||
| 	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n", | 	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n", | ||||||
| 	  dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", | 	  dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", | ||||||
| 	  dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "", | 	  dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "", | ||||||
| 	  dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "", | 	  dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "", | ||||||
| 	  dma_has_cap(DMA_SG, atdma->dma_common.cap_mask)  ? "sg-cpy " : "", |  | ||||||
| 	  plat_dat->nr_channels); | 	  plat_dat->nr_channels); | ||||||
| 
 | 
 | ||||||
| 	dma_async_device_register(&atdma->dma_common); | 	dma_async_device_register(&atdma->dma_common); | ||||||
|  |  | ||||||
|  | @ -937,8 +937,6 @@ int dma_async_device_register(struct dma_device *device) | ||||||
| 		!device->device_prep_dma_memset); | 		!device->device_prep_dma_memset); | ||||||
| 	BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && | 	BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && | ||||||
| 		!device->device_prep_dma_interrupt); | 		!device->device_prep_dma_interrupt); | ||||||
| 	BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && |  | ||||||
| 		!device->device_prep_dma_sg); |  | ||||||
| 	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && | 	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && | ||||||
| 		!device->device_prep_dma_cyclic); | 		!device->device_prep_dma_cyclic); | ||||||
| 	BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && | 	BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && | ||||||
|  |  | ||||||
|  | @ -52,15 +52,10 @@ module_param(iterations, uint, S_IRUGO | S_IWUSR); | ||||||
| MODULE_PARM_DESC(iterations, | MODULE_PARM_DESC(iterations, | ||||||
| 		"Iterations before stopping test (default: infinite)"); | 		"Iterations before stopping test (default: infinite)"); | ||||||
| 
 | 
 | ||||||
| static unsigned int sg_buffers = 1; |  | ||||||
| module_param(sg_buffers, uint, S_IRUGO | S_IWUSR); |  | ||||||
| MODULE_PARM_DESC(sg_buffers, |  | ||||||
| 		"Number of scatter gather buffers (default: 1)"); |  | ||||||
| 
 |  | ||||||
| static unsigned int dmatest; | static unsigned int dmatest; | ||||||
| module_param(dmatest, uint, S_IRUGO | S_IWUSR); | module_param(dmatest, uint, S_IRUGO | S_IWUSR); | ||||||
| MODULE_PARM_DESC(dmatest, | MODULE_PARM_DESC(dmatest, | ||||||
| 		"dmatest 0-memcpy 1-slave_sg 2-memset (default: 0)"); | 		"dmatest 0-memcpy 1-memset (default: 0)"); | ||||||
| 
 | 
 | ||||||
| static unsigned int xor_sources = 3; | static unsigned int xor_sources = 3; | ||||||
| module_param(xor_sources, uint, S_IRUGO | S_IWUSR); | module_param(xor_sources, uint, S_IRUGO | S_IWUSR); | ||||||
|  | @ -471,9 +466,6 @@ static int dmatest_func(void *data) | ||||||
| 		align = dev->fill_align; | 		align = dev->fill_align; | ||||||
| 		src_cnt = dst_cnt = 1; | 		src_cnt = dst_cnt = 1; | ||||||
| 		is_memset = true; | 		is_memset = true; | ||||||
| 	} else if (thread->type == DMA_SG) { |  | ||||||
| 		align = dev->copy_align; |  | ||||||
| 		src_cnt = dst_cnt = sg_buffers; |  | ||||||
| 	} else if (thread->type == DMA_XOR) { | 	} else if (thread->type == DMA_XOR) { | ||||||
| 		/* force odd to ensure dst = src */ | 		/* force odd to ensure dst = src */ | ||||||
| 		src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); | 		src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); | ||||||
|  | @ -553,8 +545,6 @@ static int dmatest_func(void *data) | ||||||
| 		dma_addr_t srcs[src_cnt]; | 		dma_addr_t srcs[src_cnt]; | ||||||
| 		dma_addr_t *dsts; | 		dma_addr_t *dsts; | ||||||
| 		unsigned int src_off, dst_off, len; | 		unsigned int src_off, dst_off, len; | ||||||
| 		struct scatterlist tx_sg[src_cnt]; |  | ||||||
| 		struct scatterlist rx_sg[src_cnt]; |  | ||||||
| 
 | 
 | ||||||
| 		total_tests++; | 		total_tests++; | ||||||
| 
 | 
 | ||||||
|  | @ -650,15 +640,6 @@ static int dmatest_func(void *data) | ||||||
| 			um->bidi_cnt++; | 			um->bidi_cnt++; | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		sg_init_table(tx_sg, src_cnt); |  | ||||||
| 		sg_init_table(rx_sg, src_cnt); |  | ||||||
| 		for (i = 0; i < src_cnt; i++) { |  | ||||||
| 			sg_dma_address(&rx_sg[i]) = srcs[i]; |  | ||||||
| 			sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off; |  | ||||||
| 			sg_dma_len(&tx_sg[i]) = len; |  | ||||||
| 			sg_dma_len(&rx_sg[i]) = len; |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		if (thread->type == DMA_MEMCPY) | 		if (thread->type == DMA_MEMCPY) | ||||||
| 			tx = dev->device_prep_dma_memcpy(chan, | 			tx = dev->device_prep_dma_memcpy(chan, | ||||||
| 							 dsts[0] + dst_off, | 							 dsts[0] + dst_off, | ||||||
|  | @ -668,9 +649,6 @@ static int dmatest_func(void *data) | ||||||
| 						dsts[0] + dst_off, | 						dsts[0] + dst_off, | ||||||
| 						*(thread->srcs[0] + src_off), | 						*(thread->srcs[0] + src_off), | ||||||
| 						len, flags); | 						len, flags); | ||||||
| 		else if (thread->type == DMA_SG) |  | ||||||
| 			tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt, |  | ||||||
| 						     rx_sg, src_cnt, flags); |  | ||||||
| 		else if (thread->type == DMA_XOR) | 		else if (thread->type == DMA_XOR) | ||||||
| 			tx = dev->device_prep_dma_xor(chan, | 			tx = dev->device_prep_dma_xor(chan, | ||||||
| 						      dsts[0] + dst_off, | 						      dsts[0] + dst_off, | ||||||
|  | @ -853,8 +831,6 @@ static int dmatest_add_threads(struct dmatest_info *info, | ||||||
| 		op = "copy"; | 		op = "copy"; | ||||||
| 	else if (type == DMA_MEMSET) | 	else if (type == DMA_MEMSET) | ||||||
| 		op = "set"; | 		op = "set"; | ||||||
| 	else if (type == DMA_SG) |  | ||||||
| 		op = "sg"; |  | ||||||
| 	else if (type == DMA_XOR) | 	else if (type == DMA_XOR) | ||||||
| 		op = "xor"; | 		op = "xor"; | ||||||
| 	else if (type == DMA_PQ) | 	else if (type == DMA_PQ) | ||||||
|  | @ -916,15 +892,8 @@ static int dmatest_add_channel(struct dmatest_info *info, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { | 	if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { | ||||||
| 		if (dmatest == 2) { |  | ||||||
| 			cnt = dmatest_add_threads(info, dtc, DMA_MEMSET); |  | ||||||
| 			thread_count += cnt > 0 ? cnt : 0; |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) { |  | ||||||
| 		if (dmatest == 1) { | 		if (dmatest == 1) { | ||||||
| 			cnt = dmatest_add_threads(info, dtc, DMA_SG); | 			cnt = dmatest_add_threads(info, dtc, DMA_MEMSET); | ||||||
| 			thread_count += cnt > 0 ? cnt : 0; | 			thread_count += cnt > 0 ? cnt : 0; | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  | @ -1002,7 +971,6 @@ static void run_threaded_test(struct dmatest_info *info) | ||||||
| 	request_channels(info, DMA_MEMCPY); | 	request_channels(info, DMA_MEMCPY); | ||||||
| 	request_channels(info, DMA_MEMSET); | 	request_channels(info, DMA_MEMSET); | ||||||
| 	request_channels(info, DMA_XOR); | 	request_channels(info, DMA_XOR); | ||||||
| 	request_channels(info, DMA_SG); |  | ||||||
| 	request_channels(info, DMA_PQ); | 	request_channels(info, DMA_PQ); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -825,122 +825,6 @@ fsl_dma_prep_memcpy(struct dma_chan *dchan, | ||||||
| 	return NULL; | 	return NULL; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, |  | ||||||
| 	struct scatterlist *dst_sg, unsigned int dst_nents, |  | ||||||
| 	struct scatterlist *src_sg, unsigned int src_nents, |  | ||||||
| 	unsigned long flags) |  | ||||||
| { |  | ||||||
| 	struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; |  | ||||||
| 	struct fsldma_chan *chan = to_fsl_chan(dchan); |  | ||||||
| 	size_t dst_avail, src_avail; |  | ||||||
| 	dma_addr_t dst, src; |  | ||||||
| 	size_t len; |  | ||||||
| 
 |  | ||||||
| 	/* basic sanity checks */ |  | ||||||
| 	if (dst_nents == 0 || src_nents == 0) |  | ||||||
| 		return NULL; |  | ||||||
| 
 |  | ||||||
| 	if (dst_sg == NULL || src_sg == NULL) |  | ||||||
| 		return NULL; |  | ||||||
| 
 |  | ||||||
| 	/*
 |  | ||||||
| 	 * TODO: should we check that both scatterlists have the same |  | ||||||
| 	 * TODO: number of bytes in total? Is that really an error? |  | ||||||
| 	 */ |  | ||||||
| 
 |  | ||||||
| 	/* get prepared for the loop */ |  | ||||||
| 	dst_avail = sg_dma_len(dst_sg); |  | ||||||
| 	src_avail = sg_dma_len(src_sg); |  | ||||||
| 
 |  | ||||||
| 	/* run until we are out of scatterlist entries */ |  | ||||||
| 	while (true) { |  | ||||||
| 
 |  | ||||||
| 		/* create the largest transaction possible */ |  | ||||||
| 		len = min_t(size_t, src_avail, dst_avail); |  | ||||||
| 		len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT); |  | ||||||
| 		if (len == 0) |  | ||||||
| 			goto fetch; |  | ||||||
| 
 |  | ||||||
| 		dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; |  | ||||||
| 		src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; |  | ||||||
| 
 |  | ||||||
| 		/* allocate and populate the descriptor */ |  | ||||||
| 		new = fsl_dma_alloc_descriptor(chan); |  | ||||||
| 		if (!new) { |  | ||||||
| 			chan_err(chan, "%s\n", msg_ld_oom); |  | ||||||
| 			goto fail; |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		set_desc_cnt(chan, &new->hw, len); |  | ||||||
| 		set_desc_src(chan, &new->hw, src); |  | ||||||
| 		set_desc_dst(chan, &new->hw, dst); |  | ||||||
| 
 |  | ||||||
| 		if (!first) |  | ||||||
| 			first = new; |  | ||||||
| 		else |  | ||||||
| 			set_desc_next(chan, &prev->hw, new->async_tx.phys); |  | ||||||
| 
 |  | ||||||
| 		new->async_tx.cookie = 0; |  | ||||||
| 		async_tx_ack(&new->async_tx); |  | ||||||
| 		prev = new; |  | ||||||
| 
 |  | ||||||
| 		/* Insert the link descriptor to the LD ring */ |  | ||||||
| 		list_add_tail(&new->node, &first->tx_list); |  | ||||||
| 
 |  | ||||||
| 		/* update metadata */ |  | ||||||
| 		dst_avail -= len; |  | ||||||
| 		src_avail -= len; |  | ||||||
| 
 |  | ||||||
| fetch: |  | ||||||
| 		/* fetch the next dst scatterlist entry */ |  | ||||||
| 		if (dst_avail == 0) { |  | ||||||
| 
 |  | ||||||
| 			/* no more entries: we're done */ |  | ||||||
| 			if (dst_nents == 0) |  | ||||||
| 				break; |  | ||||||
| 
 |  | ||||||
| 			/* fetch the next entry: if there are no more: done */ |  | ||||||
| 			dst_sg = sg_next(dst_sg); |  | ||||||
| 			if (dst_sg == NULL) |  | ||||||
| 				break; |  | ||||||
| 
 |  | ||||||
| 			dst_nents--; |  | ||||||
| 			dst_avail = sg_dma_len(dst_sg); |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		/* fetch the next src scatterlist entry */ |  | ||||||
| 		if (src_avail == 0) { |  | ||||||
| 
 |  | ||||||
| 			/* no more entries: we're done */ |  | ||||||
| 			if (src_nents == 0) |  | ||||||
| 				break; |  | ||||||
| 
 |  | ||||||
| 			/* fetch the next entry: if there are no more: done */ |  | ||||||
| 			src_sg = sg_next(src_sg); |  | ||||||
| 			if (src_sg == NULL) |  | ||||||
| 				break; |  | ||||||
| 
 |  | ||||||
| 			src_nents--; |  | ||||||
| 			src_avail = sg_dma_len(src_sg); |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	new->async_tx.flags = flags; /* client is in control of this ack */ |  | ||||||
| 	new->async_tx.cookie = -EBUSY; |  | ||||||
| 
 |  | ||||||
| 	/* Set End-of-link to the last link descriptor of new list */ |  | ||||||
| 	set_ld_eol(chan, new); |  | ||||||
| 
 |  | ||||||
| 	return &first->async_tx; |  | ||||||
| 
 |  | ||||||
| fail: |  | ||||||
| 	if (!first) |  | ||||||
| 		return NULL; |  | ||||||
| 
 |  | ||||||
| 	fsldma_free_desc_list_reverse(chan, &first->tx_list); |  | ||||||
| 	return NULL; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static int fsl_dma_device_terminate_all(struct dma_chan *dchan) | static int fsl_dma_device_terminate_all(struct dma_chan *dchan) | ||||||
| { | { | ||||||
| 	struct fsldma_chan *chan; | 	struct fsldma_chan *chan; | ||||||
|  | @ -1357,12 +1241,10 @@ static int fsldma_of_probe(struct platform_device *op) | ||||||
| 	fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0); | 	fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0); | ||||||
| 
 | 
 | ||||||
| 	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | 	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | ||||||
| 	dma_cap_set(DMA_SG, fdev->common.cap_mask); |  | ||||||
| 	dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); | 	dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); | ||||||
| 	fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | 	fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | ||||||
| 	fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | 	fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | ||||||
| 	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | 	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | ||||||
| 	fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; |  | ||||||
| 	fdev->common.device_tx_status = fsl_tx_status; | 	fdev->common.device_tx_status = fsl_tx_status; | ||||||
| 	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | 	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | ||||||
| 	fdev->common.device_config = fsl_dma_device_config; | 	fdev->common.device_config = fsl_dma_device_config; | ||||||
|  |  | ||||||
|  | @ -68,36 +68,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, | ||||||
| 	hw_desc->byte_count = byte_count; | 	hw_desc->byte_count = byte_count; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /* Populate the descriptor */ |  | ||||||
| static void mv_xor_config_sg_ll_desc(struct mv_xor_desc_slot *desc, |  | ||||||
| 				     dma_addr_t dma_src, dma_addr_t dma_dst, |  | ||||||
| 				     u32 len, struct mv_xor_desc_slot *prev) |  | ||||||
| { |  | ||||||
| 	struct mv_xor_desc *hw_desc = desc->hw_desc; |  | ||||||
| 
 |  | ||||||
| 	hw_desc->status = XOR_DESC_DMA_OWNED; |  | ||||||
| 	hw_desc->phy_next_desc = 0; |  | ||||||
| 	/* Configure for XOR with only one src address -> MEMCPY */ |  | ||||||
| 	hw_desc->desc_command = XOR_DESC_OPERATION_XOR | (0x1 << 0); |  | ||||||
| 	hw_desc->phy_dest_addr = dma_dst; |  | ||||||
| 	hw_desc->phy_src_addr[0] = dma_src; |  | ||||||
| 	hw_desc->byte_count = len; |  | ||||||
| 
 |  | ||||||
| 	if (prev) { |  | ||||||
| 		struct mv_xor_desc *hw_prev = prev->hw_desc; |  | ||||||
| 
 |  | ||||||
| 		hw_prev->phy_next_desc = desc->async_tx.phys; |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static void mv_xor_desc_config_eod(struct mv_xor_desc_slot *desc) |  | ||||||
| { |  | ||||||
| 	struct mv_xor_desc *hw_desc = desc->hw_desc; |  | ||||||
| 
 |  | ||||||
| 	/* Enable end-of-descriptor interrupt */ |  | ||||||
| 	hw_desc->desc_command |= XOR_DESC_EOD_INT_EN; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static void mv_desc_set_mode(struct mv_xor_desc_slot *desc) | static void mv_desc_set_mode(struct mv_xor_desc_slot *desc) | ||||||
| { | { | ||||||
| 	struct mv_xor_desc *hw_desc = desc->hw_desc; | 	struct mv_xor_desc *hw_desc = desc->hw_desc; | ||||||
|  | @ -662,132 +632,6 @@ mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) | ||||||
| 	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); | 	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /**
 |  | ||||||
|  * mv_xor_prep_dma_sg - prepare descriptors for a memory sg transaction |  | ||||||
|  * @chan: DMA channel |  | ||||||
|  * @dst_sg: Destination scatter list |  | ||||||
|  * @dst_sg_len: Number of entries in destination scatter list |  | ||||||
|  * @src_sg: Source scatter list |  | ||||||
|  * @src_sg_len: Number of entries in source scatter list |  | ||||||
|  * @flags: transfer ack flags |  | ||||||
|  * |  | ||||||
|  * Return: Async transaction descriptor on success and NULL on failure |  | ||||||
|  */ |  | ||||||
| static struct dma_async_tx_descriptor * |  | ||||||
| mv_xor_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg, |  | ||||||
| 		   unsigned int dst_sg_len, struct scatterlist *src_sg, |  | ||||||
| 		   unsigned int src_sg_len, unsigned long flags) |  | ||||||
| { |  | ||||||
| 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); |  | ||||||
| 	struct mv_xor_desc_slot *new; |  | ||||||
| 	struct mv_xor_desc_slot *first = NULL; |  | ||||||
| 	struct mv_xor_desc_slot *prev = NULL; |  | ||||||
| 	size_t len, dst_avail, src_avail; |  | ||||||
| 	dma_addr_t dma_dst, dma_src; |  | ||||||
| 	int desc_cnt = 0; |  | ||||||
| 	int ret; |  | ||||||
| 
 |  | ||||||
| 	dev_dbg(mv_chan_to_devp(mv_chan), |  | ||||||
| 		"%s dst_sg_len: %d src_sg_len: %d flags: %ld\n", |  | ||||||
| 		__func__, dst_sg_len, src_sg_len, flags); |  | ||||||
| 
 |  | ||||||
| 	dst_avail = sg_dma_len(dst_sg); |  | ||||||
| 	src_avail = sg_dma_len(src_sg); |  | ||||||
| 
 |  | ||||||
| 	/* Run until we are out of scatterlist entries */ |  | ||||||
| 	while (true) { |  | ||||||
| 		/* Allocate and populate the descriptor */ |  | ||||||
| 		desc_cnt++; |  | ||||||
| 		new = mv_chan_alloc_slot(mv_chan); |  | ||||||
| 		if (!new) { |  | ||||||
| 			dev_err(mv_chan_to_devp(mv_chan), |  | ||||||
| 				"Out of descriptors (desc_cnt=%d)!\n", |  | ||||||
| 				desc_cnt); |  | ||||||
| 			goto err; |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		len = min_t(size_t, src_avail, dst_avail); |  | ||||||
| 		len = min_t(size_t, len, MV_XOR_MAX_BYTE_COUNT); |  | ||||||
| 		if (len == 0) |  | ||||||
| 			goto fetch; |  | ||||||
| 
 |  | ||||||
| 		if (len < MV_XOR_MIN_BYTE_COUNT) { |  | ||||||
| 			dev_err(mv_chan_to_devp(mv_chan), |  | ||||||
| 				"Transfer size of %zu too small!\n", len); |  | ||||||
| 			goto err; |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - |  | ||||||
| 			dst_avail; |  | ||||||
| 		dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - |  | ||||||
| 			src_avail; |  | ||||||
| 
 |  | ||||||
| 		/* Check if a new window needs to get added for 'dst' */ |  | ||||||
| 		ret = mv_xor_add_io_win(mv_chan, dma_dst); |  | ||||||
| 		if (ret) |  | ||||||
| 			goto err; |  | ||||||
| 
 |  | ||||||
| 		/* Check if a new window needs to get added for 'src' */ |  | ||||||
| 		ret = mv_xor_add_io_win(mv_chan, dma_src); |  | ||||||
| 		if (ret) |  | ||||||
| 			goto err; |  | ||||||
| 
 |  | ||||||
| 		/* Populate the descriptor */ |  | ||||||
| 		mv_xor_config_sg_ll_desc(new, dma_src, dma_dst, len, prev); |  | ||||||
| 		prev = new; |  | ||||||
| 		dst_avail -= len; |  | ||||||
| 		src_avail -= len; |  | ||||||
| 
 |  | ||||||
| 		if (!first) |  | ||||||
| 			first = new; |  | ||||||
| 		else |  | ||||||
| 			list_move_tail(&new->node, &first->sg_tx_list); |  | ||||||
| 
 |  | ||||||
| fetch: |  | ||||||
| 		/* Fetch the next dst scatterlist entry */ |  | ||||||
| 		if (dst_avail == 0) { |  | ||||||
| 			if (dst_sg_len == 0) |  | ||||||
| 				break; |  | ||||||
| 
 |  | ||||||
| 			/* Fetch the next entry: if there are no more: done */ |  | ||||||
| 			dst_sg = sg_next(dst_sg); |  | ||||||
| 			if (dst_sg == NULL) |  | ||||||
| 				break; |  | ||||||
| 
 |  | ||||||
| 			dst_sg_len--; |  | ||||||
| 			dst_avail = sg_dma_len(dst_sg); |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		/* Fetch the next src scatterlist entry */ |  | ||||||
| 		if (src_avail == 0) { |  | ||||||
| 			if (src_sg_len == 0) |  | ||||||
| 				break; |  | ||||||
| 
 |  | ||||||
| 			/* Fetch the next entry: if there are no more: done */ |  | ||||||
| 			src_sg = sg_next(src_sg); |  | ||||||
| 			if (src_sg == NULL) |  | ||||||
| 				break; |  | ||||||
| 
 |  | ||||||
| 			src_sg_len--; |  | ||||||
| 			src_avail = sg_dma_len(src_sg); |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	/* Set the EOD flag in the last descriptor */ |  | ||||||
| 	mv_xor_desc_config_eod(new); |  | ||||||
| 	first->async_tx.flags = flags; |  | ||||||
| 
 |  | ||||||
| 	return &first->async_tx; |  | ||||||
| 
 |  | ||||||
| err: |  | ||||||
| 	/* Cleanup: Move all descriptors back into the free list */ |  | ||||||
| 	spin_lock_bh(&mv_chan->lock); |  | ||||||
| 	mv_desc_clean_slot(first, mv_chan); |  | ||||||
| 	spin_unlock_bh(&mv_chan->lock); |  | ||||||
| 
 |  | ||||||
| 	return NULL; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static void mv_xor_free_chan_resources(struct dma_chan *chan) | static void mv_xor_free_chan_resources(struct dma_chan *chan) | ||||||
| { | { | ||||||
| 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||||||
|  | @ -1254,8 +1098,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | ||||||
| 		dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; | 		dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; | ||||||
| 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | ||||||
| 		dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | 		dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | ||||||
| 	if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) |  | ||||||
| 		dma_dev->device_prep_dma_sg = mv_xor_prep_dma_sg; |  | ||||||
| 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | ||||||
| 		dma_dev->max_xor = 8; | 		dma_dev->max_xor = 8; | ||||||
| 		dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; | 		dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; | ||||||
|  | @ -1305,11 +1147,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | ||||||
| 			goto err_free_irq; | 			goto err_free_irq; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s%s)\n", | 	dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n", | ||||||
| 		 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode", | 		 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode", | ||||||
| 		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | 		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | ||||||
| 		 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | 		 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | ||||||
| 		 dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "sg " : "", |  | ||||||
| 		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | 		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | ||||||
| 
 | 
 | ||||||
| 	dma_async_device_register(dma_dev); | 	dma_async_device_register(dma_dev); | ||||||
|  | @ -1552,7 +1393,6 @@ static int mv_xor_probe(struct platform_device *pdev) | ||||||
| 
 | 
 | ||||||
| 			dma_cap_zero(cap_mask); | 			dma_cap_zero(cap_mask); | ||||||
| 			dma_cap_set(DMA_MEMCPY, cap_mask); | 			dma_cap_set(DMA_MEMCPY, cap_mask); | ||||||
| 			dma_cap_set(DMA_SG, cap_mask); |  | ||||||
| 			dma_cap_set(DMA_XOR, cap_mask); | 			dma_cap_set(DMA_XOR, cap_mask); | ||||||
| 			dma_cap_set(DMA_INTERRUPT, cap_mask); | 			dma_cap_set(DMA_INTERRUPT, cap_mask); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1005,21 +1005,6 @@ static struct dma_async_tx_descriptor *nbpf_prep_memcpy( | ||||||
| 			    DMA_MEM_TO_MEM, flags); | 			    DMA_MEM_TO_MEM, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static struct dma_async_tx_descriptor *nbpf_prep_memcpy_sg( |  | ||||||
| 	struct dma_chan *dchan, |  | ||||||
| 	struct scatterlist *dst_sg, unsigned int dst_nents, |  | ||||||
| 	struct scatterlist *src_sg, unsigned int src_nents, |  | ||||||
| 	unsigned long flags) |  | ||||||
| { |  | ||||||
| 	struct nbpf_channel *chan = nbpf_to_chan(dchan); |  | ||||||
| 
 |  | ||||||
| 	if (dst_nents != src_nents) |  | ||||||
| 		return NULL; |  | ||||||
| 
 |  | ||||||
| 	return nbpf_prep_sg(chan, src_sg, dst_sg, src_nents, |  | ||||||
| 			    DMA_MEM_TO_MEM, flags); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static struct dma_async_tx_descriptor *nbpf_prep_slave_sg( | static struct dma_async_tx_descriptor *nbpf_prep_slave_sg( | ||||||
| 	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | 	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | ||||||
| 	enum dma_transfer_direction direction, unsigned long flags, void *context) | 	enum dma_transfer_direction direction, unsigned long flags, void *context) | ||||||
|  | @ -1417,13 +1402,11 @@ static int nbpf_probe(struct platform_device *pdev) | ||||||
| 	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | 	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||||||
| 	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | 	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | ||||||
| 	dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); | 	dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); | ||||||
| 	dma_cap_set(DMA_SG, dma_dev->cap_mask); |  | ||||||
| 
 | 
 | ||||||
| 	/* Common and MEMCPY operations */ | 	/* Common and MEMCPY operations */ | ||||||
| 	dma_dev->device_alloc_chan_resources | 	dma_dev->device_alloc_chan_resources | ||||||
| 		= nbpf_alloc_chan_resources; | 		= nbpf_alloc_chan_resources; | ||||||
| 	dma_dev->device_free_chan_resources = nbpf_free_chan_resources; | 	dma_dev->device_free_chan_resources = nbpf_free_chan_resources; | ||||||
| 	dma_dev->device_prep_dma_sg = nbpf_prep_memcpy_sg; |  | ||||||
| 	dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; | 	dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; | ||||||
| 	dma_dev->device_tx_status = nbpf_tx_status; | 	dma_dev->device_tx_status = nbpf_tx_status; | ||||||
| 	dma_dev->device_issue_pending = nbpf_issue_pending; | 	dma_dev->device_issue_pending = nbpf_issue_pending; | ||||||
|  |  | ||||||
|  | @ -2484,19 +2484,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | ||||||
| 			   DMA_MEM_TO_MEM, dma_flags); | 			   DMA_MEM_TO_MEM, dma_flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static struct dma_async_tx_descriptor * |  | ||||||
| d40_prep_memcpy_sg(struct dma_chan *chan, |  | ||||||
| 		   struct scatterlist *dst_sg, unsigned int dst_nents, |  | ||||||
| 		   struct scatterlist *src_sg, unsigned int src_nents, |  | ||||||
| 		   unsigned long dma_flags) |  | ||||||
| { |  | ||||||
| 	if (dst_nents != src_nents) |  | ||||||
| 		return NULL; |  | ||||||
| 
 |  | ||||||
| 	return d40_prep_sg(chan, src_sg, dst_sg, src_nents, |  | ||||||
| 			   DMA_MEM_TO_MEM, dma_flags); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static struct dma_async_tx_descriptor * | static struct dma_async_tx_descriptor * | ||||||
| d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | ||||||
| 		  unsigned int sg_len, enum dma_transfer_direction direction, | 		  unsigned int sg_len, enum dma_transfer_direction direction, | ||||||
|  | @ -2821,9 +2808,6 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev) | ||||||
| 		dev->copy_align = DMAENGINE_ALIGN_4_BYTES; | 		dev->copy_align = DMAENGINE_ALIGN_4_BYTES; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (dma_has_cap(DMA_SG, dev->cap_mask)) |  | ||||||
| 		dev->device_prep_dma_sg = d40_prep_memcpy_sg; |  | ||||||
| 
 |  | ||||||
| 	if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) | 	if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) | ||||||
| 		dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; | 		dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; | ||||||
| 
 | 
 | ||||||
|  | @ -2865,7 +2849,6 @@ static int __init d40_dmaengine_init(struct d40_base *base, | ||||||
| 
 | 
 | ||||||
| 	dma_cap_zero(base->dma_memcpy.cap_mask); | 	dma_cap_zero(base->dma_memcpy.cap_mask); | ||||||
| 	dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); | 	dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); | ||||||
| 	dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask); |  | ||||||
| 
 | 
 | ||||||
| 	d40_ops_init(base, &base->dma_memcpy); | 	d40_ops_init(base, &base->dma_memcpy); | ||||||
| 
 | 
 | ||||||
|  | @ -2883,7 +2866,6 @@ static int __init d40_dmaengine_init(struct d40_base *base, | ||||||
| 	dma_cap_zero(base->dma_both.cap_mask); | 	dma_cap_zero(base->dma_both.cap_mask); | ||||||
| 	dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); | 	dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); | ||||||
| 	dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); | 	dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); | ||||||
| 	dma_cap_set(DMA_SG, base->dma_both.cap_mask); |  | ||||||
| 	dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); | 	dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); | ||||||
| 
 | 
 | ||||||
| 	d40_ops_init(base, &base->dma_both); | 	d40_ops_init(base, &base->dma_both); | ||||||
|  |  | ||||||
|  | @ -425,48 +425,6 @@ static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc, | ||||||
| 				XGENE_DMA_DESC_HOENQ_NUM_POS); | 				XGENE_DMA_DESC_HOENQ_NUM_POS); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan, |  | ||||||
| 				    struct xgene_dma_desc_sw *desc_sw, |  | ||||||
| 				    dma_addr_t dst, dma_addr_t src, |  | ||||||
| 				    size_t len) |  | ||||||
| { |  | ||||||
| 	struct xgene_dma_desc_hw *desc1, *desc2; |  | ||||||
| 	int i; |  | ||||||
| 
 |  | ||||||
| 	/* Get 1st descriptor */ |  | ||||||
| 	desc1 = &desc_sw->desc1; |  | ||||||
| 	xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); |  | ||||||
| 
 |  | ||||||
| 	/* Set destination address */ |  | ||||||
| 	desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT); |  | ||||||
| 	desc1->m3 |= cpu_to_le64(dst); |  | ||||||
| 
 |  | ||||||
| 	/* Set 1st source address */ |  | ||||||
| 	xgene_dma_set_src_buffer(&desc1->m1, &len, &src); |  | ||||||
| 
 |  | ||||||
| 	if (!len) |  | ||||||
| 		return; |  | ||||||
| 
 |  | ||||||
| 	/*
 |  | ||||||
| 	 * We need to split this source buffer, |  | ||||||
| 	 * and need to use 2nd descriptor |  | ||||||
| 	 */ |  | ||||||
| 	desc2 = &desc_sw->desc2; |  | ||||||
| 	desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT); |  | ||||||
| 
 |  | ||||||
| 	/* Set 2nd to 5th source address */ |  | ||||||
| 	for (i = 0; i < 4 && len; i++) |  | ||||||
| 		xgene_dma_set_src_buffer(xgene_dma_lookup_ext8(desc2, i), |  | ||||||
| 					 &len, &src); |  | ||||||
| 
 |  | ||||||
| 	/* Invalidate unused source address field */ |  | ||||||
| 	for (; i < 4; i++) |  | ||||||
| 		xgene_dma_invalidate_buffer(xgene_dma_lookup_ext8(desc2, i)); |  | ||||||
| 
 |  | ||||||
| 	/* Updated flag that we have prepared 64B descriptor */ |  | ||||||
| 	desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, | static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, | ||||||
| 				    struct xgene_dma_desc_sw *desc_sw, | 				    struct xgene_dma_desc_sw *desc_sw, | ||||||
| 				    dma_addr_t *dst, dma_addr_t *src, | 				    dma_addr_t *dst, dma_addr_t *src, | ||||||
|  | @ -891,114 +849,6 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan) | ||||||
| 	chan->desc_pool = NULL; | 	chan->desc_pool = NULL; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static struct dma_async_tx_descriptor *xgene_dma_prep_sg( |  | ||||||
| 	struct dma_chan *dchan, struct scatterlist *dst_sg, |  | ||||||
| 	u32 dst_nents, struct scatterlist *src_sg, |  | ||||||
| 	u32 src_nents, unsigned long flags) |  | ||||||
| { |  | ||||||
| 	struct xgene_dma_desc_sw *first = NULL, *new = NULL; |  | ||||||
| 	struct xgene_dma_chan *chan; |  | ||||||
| 	size_t dst_avail, src_avail; |  | ||||||
| 	dma_addr_t dst, src; |  | ||||||
| 	size_t len; |  | ||||||
| 
 |  | ||||||
| 	if (unlikely(!dchan)) |  | ||||||
| 		return NULL; |  | ||||||
| 
 |  | ||||||
| 	if (unlikely(!dst_nents || !src_nents)) |  | ||||||
| 		return NULL; |  | ||||||
| 
 |  | ||||||
| 	if (unlikely(!dst_sg || !src_sg)) |  | ||||||
| 		return NULL; |  | ||||||
| 
 |  | ||||||
| 	chan = to_dma_chan(dchan); |  | ||||||
| 
 |  | ||||||
| 	/* Get prepared for the loop */ |  | ||||||
| 	dst_avail = sg_dma_len(dst_sg); |  | ||||||
| 	src_avail = sg_dma_len(src_sg); |  | ||||||
| 	dst_nents--; |  | ||||||
| 	src_nents--; |  | ||||||
| 
 |  | ||||||
| 	/* Run until we are out of scatterlist entries */ |  | ||||||
| 	while (true) { |  | ||||||
| 		/* Create the largest transaction possible */ |  | ||||||
| 		len = min_t(size_t, src_avail, dst_avail); |  | ||||||
| 		len = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT); |  | ||||||
| 		if (len == 0) |  | ||||||
| 			goto fetch; |  | ||||||
| 
 |  | ||||||
| 		dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; |  | ||||||
| 		src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; |  | ||||||
| 
 |  | ||||||
| 		/* Allocate the link descriptor from DMA pool */ |  | ||||||
| 		new = xgene_dma_alloc_descriptor(chan); |  | ||||||
| 		if (!new) |  | ||||||
| 			goto fail; |  | ||||||
| 
 |  | ||||||
| 		/* Prepare DMA descriptor */ |  | ||||||
| 		xgene_dma_prep_cpy_desc(chan, new, dst, src, len); |  | ||||||
| 
 |  | ||||||
| 		if (!first) |  | ||||||
| 			first = new; |  | ||||||
| 
 |  | ||||||
| 		new->tx.cookie = 0; |  | ||||||
| 		async_tx_ack(&new->tx); |  | ||||||
| 
 |  | ||||||
| 		/* update metadata */ |  | ||||||
| 		dst_avail -= len; |  | ||||||
| 		src_avail -= len; |  | ||||||
| 
 |  | ||||||
| 		/* Insert the link descriptor to the LD ring */ |  | ||||||
| 		list_add_tail(&new->node, &first->tx_list); |  | ||||||
| 
 |  | ||||||
| fetch: |  | ||||||
| 		/* fetch the next dst scatterlist entry */ |  | ||||||
| 		if (dst_avail == 0) { |  | ||||||
| 			/* no more entries: we're done */ |  | ||||||
| 			if (dst_nents == 0) |  | ||||||
| 				break; |  | ||||||
| 
 |  | ||||||
| 			/* fetch the next entry: if there are no more: done */ |  | ||||||
| 			dst_sg = sg_next(dst_sg); |  | ||||||
| 			if (!dst_sg) |  | ||||||
| 				break; |  | ||||||
| 
 |  | ||||||
| 			dst_nents--; |  | ||||||
| 			dst_avail = sg_dma_len(dst_sg); |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		/* fetch the next src scatterlist entry */ |  | ||||||
| 		if (src_avail == 0) { |  | ||||||
| 			/* no more entries: we're done */ |  | ||||||
| 			if (src_nents == 0) |  | ||||||
| 				break; |  | ||||||
| 
 |  | ||||||
| 			/* fetch the next entry: if there are no more: done */ |  | ||||||
| 			src_sg = sg_next(src_sg); |  | ||||||
| 			if (!src_sg) |  | ||||||
| 				break; |  | ||||||
| 
 |  | ||||||
| 			src_nents--; |  | ||||||
| 			src_avail = sg_dma_len(src_sg); |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if (!new) |  | ||||||
| 		return NULL; |  | ||||||
| 
 |  | ||||||
| 	new->tx.flags = flags; /* client is in control of this ack */ |  | ||||||
| 	new->tx.cookie = -EBUSY; |  | ||||||
| 	list_splice(&first->tx_list, &new->tx_list); |  | ||||||
| 
 |  | ||||||
| 	return &new->tx; |  | ||||||
| fail: |  | ||||||
| 	if (!first) |  | ||||||
| 		return NULL; |  | ||||||
| 
 |  | ||||||
| 	xgene_dma_free_desc_list(chan, &first->tx_list); |  | ||||||
| 	return NULL; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static struct dma_async_tx_descriptor *xgene_dma_prep_xor( | static struct dma_async_tx_descriptor *xgene_dma_prep_xor( | ||||||
| 	struct dma_chan *dchan, dma_addr_t dst,	dma_addr_t *src, | 	struct dma_chan *dchan, dma_addr_t dst,	dma_addr_t *src, | ||||||
| 	u32 src_cnt, size_t len, unsigned long flags) | 	u32 src_cnt, size_t len, unsigned long flags) | ||||||
|  | @ -1653,7 +1503,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan, | ||||||
| 	dma_cap_zero(dma_dev->cap_mask); | 	dma_cap_zero(dma_dev->cap_mask); | ||||||
| 
 | 
 | ||||||
| 	/* Set DMA device capability */ | 	/* Set DMA device capability */ | ||||||
| 	dma_cap_set(DMA_SG, dma_dev->cap_mask); |  | ||||||
| 
 | 
 | ||||||
| 	/* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
 | 	/* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
 | ||||||
| 	 * and channel 1 supports XOR, PQ both. First thing here is we have | 	 * and channel 1 supports XOR, PQ both. First thing here is we have | ||||||
|  | @ -1679,7 +1528,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan, | ||||||
| 	dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources; | 	dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources; | ||||||
| 	dma_dev->device_issue_pending = xgene_dma_issue_pending; | 	dma_dev->device_issue_pending = xgene_dma_issue_pending; | ||||||
| 	dma_dev->device_tx_status = xgene_dma_tx_status; | 	dma_dev->device_tx_status = xgene_dma_tx_status; | ||||||
| 	dma_dev->device_prep_dma_sg = xgene_dma_prep_sg; |  | ||||||
| 
 | 
 | ||||||
| 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | ||||||
| 		dma_dev->device_prep_dma_xor = xgene_dma_prep_xor; | 		dma_dev->device_prep_dma_xor = xgene_dma_prep_xor; | ||||||
|  | @ -1731,8 +1579,7 @@ static int xgene_dma_async_register(struct xgene_dma *pdma, int id) | ||||||
| 
 | 
 | ||||||
| 	/* DMA capability info */ | 	/* DMA capability info */ | ||||||
| 	dev_info(pdma->dev, | 	dev_info(pdma->dev, | ||||||
| 		 "%s: CAPABILITY ( %s%s%s)\n", dma_chan_name(&chan->dma_chan), | 		 "%s: CAPABILITY ( %s%s)\n", dma_chan_name(&chan->dma_chan), | ||||||
| 		 dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "", |  | ||||||
| 		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "", | 		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "", | ||||||
| 		 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : ""); | 		 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : ""); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -829,98 +829,6 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy( | ||||||
| 	return &first->async_tx; | 	return &first->async_tx; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /**
 |  | ||||||
|  * zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction |  | ||||||
|  * @dchan: DMA channel |  | ||||||
|  * @dst_sg: Destination scatter list |  | ||||||
|  * @dst_sg_len: Number of entries in destination scatter list |  | ||||||
|  * @src_sg: Source scatter list |  | ||||||
|  * @src_sg_len: Number of entries in source scatter list |  | ||||||
|  * @flags: transfer ack flags |  | ||||||
|  * |  | ||||||
|  * Return: Async transaction descriptor on success and NULL on failure |  | ||||||
|  */ |  | ||||||
| static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg( |  | ||||||
| 			struct dma_chan *dchan, struct scatterlist *dst_sg, |  | ||||||
| 			unsigned int dst_sg_len, struct scatterlist *src_sg, |  | ||||||
| 			unsigned int src_sg_len, unsigned long flags) |  | ||||||
| { |  | ||||||
| 	struct zynqmp_dma_desc_sw *new, *first = NULL; |  | ||||||
| 	struct zynqmp_dma_chan *chan = to_chan(dchan); |  | ||||||
| 	void *desc = NULL, *prev = NULL; |  | ||||||
| 	size_t len, dst_avail, src_avail; |  | ||||||
| 	dma_addr_t dma_dst, dma_src; |  | ||||||
| 	u32 desc_cnt = 0, i; |  | ||||||
| 	struct scatterlist *sg; |  | ||||||
| 
 |  | ||||||
| 	for_each_sg(src_sg, sg, src_sg_len, i) |  | ||||||
| 		desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), |  | ||||||
| 					 ZYNQMP_DMA_MAX_TRANS_LEN); |  | ||||||
| 
 |  | ||||||
| 	spin_lock_bh(&chan->lock); |  | ||||||
| 	if (desc_cnt > chan->desc_free_cnt) { |  | ||||||
| 		spin_unlock_bh(&chan->lock); |  | ||||||
| 		dev_dbg(chan->dev, "chan %p descs are not available\n", chan); |  | ||||||
| 		return NULL; |  | ||||||
| 	} |  | ||||||
| 	chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; |  | ||||||
| 	spin_unlock_bh(&chan->lock); |  | ||||||
| 
 |  | ||||||
| 	dst_avail = sg_dma_len(dst_sg); |  | ||||||
| 	src_avail = sg_dma_len(src_sg); |  | ||||||
| 
 |  | ||||||
| 	/* Run until we are out of scatterlist entries */ |  | ||||||
| 	while (true) { |  | ||||||
| 		/* Allocate and populate the descriptor */ |  | ||||||
| 		new = zynqmp_dma_get_descriptor(chan); |  | ||||||
| 		desc = (struct zynqmp_dma_desc_ll *)new->src_v; |  | ||||||
| 		len = min_t(size_t, src_avail, dst_avail); |  | ||||||
| 		len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN); |  | ||||||
| 		if (len == 0) |  | ||||||
| 			goto fetch; |  | ||||||
| 		dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - |  | ||||||
| 			dst_avail; |  | ||||||
| 		dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - |  | ||||||
| 			src_avail; |  | ||||||
| 
 |  | ||||||
| 		zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst, |  | ||||||
| 					     len, prev); |  | ||||||
| 		prev = desc; |  | ||||||
| 		dst_avail -= len; |  | ||||||
| 		src_avail -= len; |  | ||||||
| 
 |  | ||||||
| 		if (!first) |  | ||||||
| 			first = new; |  | ||||||
| 		else |  | ||||||
| 			list_add_tail(&new->node, &first->tx_list); |  | ||||||
| fetch: |  | ||||||
| 		/* Fetch the next dst scatterlist entry */ |  | ||||||
| 		if (dst_avail == 0) { |  | ||||||
| 			if (dst_sg_len == 0) |  | ||||||
| 				break; |  | ||||||
| 			dst_sg = sg_next(dst_sg); |  | ||||||
| 			if (dst_sg == NULL) |  | ||||||
| 				break; |  | ||||||
| 			dst_sg_len--; |  | ||||||
| 			dst_avail = sg_dma_len(dst_sg); |  | ||||||
| 		} |  | ||||||
| 		/* Fetch the next src scatterlist entry */ |  | ||||||
| 		if (src_avail == 0) { |  | ||||||
| 			if (src_sg_len == 0) |  | ||||||
| 				break; |  | ||||||
| 			src_sg = sg_next(src_sg); |  | ||||||
| 			if (src_sg == NULL) |  | ||||||
| 				break; |  | ||||||
| 			src_sg_len--; |  | ||||||
| 			src_avail = sg_dma_len(src_sg); |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	zynqmp_dma_desc_config_eod(chan, desc); |  | ||||||
| 	first->async_tx.flags = flags; |  | ||||||
| 	return &first->async_tx; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /**
 | /**
 | ||||||
|  * zynqmp_dma_chan_remove - Channel remove function |  * zynqmp_dma_chan_remove - Channel remove function | ||||||
|  * @chan: ZynqMP DMA channel pointer |  * @chan: ZynqMP DMA channel pointer | ||||||
|  | @ -1064,11 +972,9 @@ static int zynqmp_dma_probe(struct platform_device *pdev) | ||||||
| 	INIT_LIST_HEAD(&zdev->common.channels); | 	INIT_LIST_HEAD(&zdev->common.channels); | ||||||
| 
 | 
 | ||||||
| 	dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); | 	dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); | ||||||
| 	dma_cap_set(DMA_SG, zdev->common.cap_mask); |  | ||||||
| 	dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask); | 	dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask); | ||||||
| 
 | 
 | ||||||
| 	p = &zdev->common; | 	p = &zdev->common; | ||||||
| 	p->device_prep_dma_sg = zynqmp_dma_prep_sg; |  | ||||||
| 	p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy; | 	p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy; | ||||||
| 	p->device_terminate_all = zynqmp_dma_device_terminate_all; | 	p->device_terminate_all = zynqmp_dma_device_terminate_all; | ||||||
| 	p->device_issue_pending = zynqmp_dma_issue_pending; | 	p->device_issue_pending = zynqmp_dma_issue_pending; | ||||||
|  |  | ||||||
|  | @ -68,7 +68,6 @@ enum dma_transaction_type { | ||||||
| 	DMA_MEMSET, | 	DMA_MEMSET, | ||||||
| 	DMA_MEMSET_SG, | 	DMA_MEMSET_SG, | ||||||
| 	DMA_INTERRUPT, | 	DMA_INTERRUPT, | ||||||
| 	DMA_SG, |  | ||||||
| 	DMA_PRIVATE, | 	DMA_PRIVATE, | ||||||
| 	DMA_ASYNC_TX, | 	DMA_ASYNC_TX, | ||||||
| 	DMA_SLAVE, | 	DMA_SLAVE, | ||||||
|  | @ -771,11 +770,6 @@ struct dma_device { | ||||||
| 		unsigned int nents, int value, unsigned long flags); | 		unsigned int nents, int value, unsigned long flags); | ||||||
| 	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | 	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | ||||||
| 		struct dma_chan *chan, unsigned long flags); | 		struct dma_chan *chan, unsigned long flags); | ||||||
| 	struct dma_async_tx_descriptor *(*device_prep_dma_sg)( |  | ||||||
| 		struct dma_chan *chan, |  | ||||||
| 		struct scatterlist *dst_sg, unsigned int dst_nents, |  | ||||||
| 		struct scatterlist *src_sg, unsigned int src_nents, |  | ||||||
| 		unsigned long flags); |  | ||||||
| 
 | 
 | ||||||
| 	struct dma_async_tx_descriptor *(*device_prep_slave_sg)( | 	struct dma_async_tx_descriptor *(*device_prep_slave_sg)( | ||||||
| 		struct dma_chan *chan, struct scatterlist *sgl, | 		struct dma_chan *chan, struct scatterlist *sgl, | ||||||
|  | @ -905,19 +899,6 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy( | ||||||
| 						    len, flags); | 						    len, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( |  | ||||||
| 		struct dma_chan *chan, |  | ||||||
| 		struct scatterlist *dst_sg, unsigned int dst_nents, |  | ||||||
| 		struct scatterlist *src_sg, unsigned int src_nents, |  | ||||||
| 		unsigned long flags) |  | ||||||
| { |  | ||||||
| 	if (!chan || !chan->device || !chan->device->device_prep_dma_sg) |  | ||||||
| 		return NULL; |  | ||||||
| 
 |  | ||||||
| 	return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents, |  | ||||||
| 			src_sg, src_nents, flags); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /**
 | /**
 | ||||||
|  * dmaengine_terminate_all() - Terminate all active DMA transfers |  * dmaengine_terminate_all() - Terminate all active DMA transfers | ||||||
|  * @chan: The channel for which to terminate the transfers |  * @chan: The channel for which to terminate the transfers | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Dave Jiang
						Dave Jiang