forked from mirrors/linux
		
	cross-tree: phase out dma_zalloc_coherent()
We already need to zero out memory for dma_alloc_coherent(), as such using dma_zalloc_coherent() is superflous. Phase it out. This change was generated with the following Coccinelle SmPL patch: @ replace_dma_zalloc_coherent @ expression dev, size, data, handle, flags; @@ -dma_zalloc_coherent(dev, size, handle, flags) +dma_alloc_coherent(dev, size, handle, flags) Suggested-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Luis Chamberlain <mcgrof@kernel.org> [hch: re-ran the script on the latest tree] Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
		
							parent
							
								
									3bd6e94bec
								
							
						
					
					
						commit
						750afb08ca
					
				
					 173 changed files with 915 additions and 949 deletions
				
			
		|  | @ -129,9 +129,9 @@ ltq_dma_alloc(struct ltq_dma_channel *ch) | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
| 	ch->desc = 0; | 	ch->desc = 0; | ||||||
| 	ch->desc_base = dma_zalloc_coherent(ch->dev, | 	ch->desc_base = dma_alloc_coherent(ch->dev, | ||||||
| 				LTQ_DESC_NUM * LTQ_DESC_SIZE, | 					   LTQ_DESC_NUM * LTQ_DESC_SIZE, | ||||||
| 				&ch->phys, GFP_ATOMIC); | 					   &ch->phys, GFP_ATOMIC); | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irqsave(<q_dma_lock, flags); | 	spin_lock_irqsave(<q_dma_lock, flags); | ||||||
| 	ltq_dma_w32(ch->nr, LTQ_DMA_CS); | 	ltq_dma_w32(ch->nr, LTQ_DMA_CS); | ||||||
|  |  | ||||||
|  | @ -255,7 +255,7 @@ int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size) | ||||||
| 
 | 
 | ||||||
| 	chan->ring_size = ring_size; | 	chan->ring_size = ring_size; | ||||||
| 
 | 
 | ||||||
| 	chan->ring_virt = dma_zalloc_coherent(&dma_pdev->dev, | 	chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev, | ||||||
| 					     ring_size * sizeof(u64), | 					     ring_size * sizeof(u64), | ||||||
| 					     &chan->ring_dma, GFP_KERNEL); | 					     &chan->ring_dma, GFP_KERNEL); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -756,9 +756,10 @@ fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* Initialize outbound message descriptor ring */ | 	/* Initialize outbound message descriptor ring */ | ||||||
| 	rmu->msg_tx_ring.virt = dma_zalloc_coherent(priv->dev, | 	rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, | ||||||
| 				rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, | 						   rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, | ||||||
| 				&rmu->msg_tx_ring.phys, GFP_KERNEL); | 						   &rmu->msg_tx_ring.phys, | ||||||
|  | 						   GFP_KERNEL); | ||||||
| 	if (!rmu->msg_tx_ring.virt) { | 	if (!rmu->msg_tx_ring.virt) { | ||||||
| 		rc = -ENOMEM; | 		rc = -ENOMEM; | ||||||
| 		goto out_dma; | 		goto out_dma; | ||||||
|  |  | ||||||
|  | @ -729,8 +729,8 @@ static int sata_fsl_port_start(struct ata_port *ap) | ||||||
| 	if (!pp) | 	if (!pp) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 	mem = dma_zalloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, | 	mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, | ||||||
| 				  GFP_KERNEL); | 				 GFP_KERNEL); | ||||||
| 	if (!mem) { | 	if (!mem) { | ||||||
| 		kfree(pp); | 		kfree(pp); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -533,9 +533,10 @@ static void he_init_tx_lbfp(struct he_dev *he_dev) | ||||||
| 
 | 
 | ||||||
| static int he_init_tpdrq(struct he_dev *he_dev) | static int he_init_tpdrq(struct he_dev *he_dev) | ||||||
| { | { | ||||||
| 	he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, | 	he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, | ||||||
| 						 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), | 						CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), | ||||||
| 						 &he_dev->tpdrq_phys, GFP_KERNEL); | 						&he_dev->tpdrq_phys, | ||||||
|  | 						GFP_KERNEL); | ||||||
| 	if (he_dev->tpdrq_base == NULL) { | 	if (he_dev->tpdrq_base == NULL) { | ||||||
| 		hprintk("failed to alloc tpdrq\n"); | 		hprintk("failed to alloc tpdrq\n"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  | @ -805,9 +806,9 @@ static int he_init_group(struct he_dev *he_dev, int group) | ||||||
| 		goto out_free_rbpl_virt; | 		goto out_free_rbpl_virt; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, | 	he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev, | ||||||
| 						CONFIG_RBPL_SIZE * sizeof(struct he_rbp), | 					       CONFIG_RBPL_SIZE * sizeof(struct he_rbp), | ||||||
| 						&he_dev->rbpl_phys, GFP_KERNEL); | 					       &he_dev->rbpl_phys, GFP_KERNEL); | ||||||
| 	if (he_dev->rbpl_base == NULL) { | 	if (he_dev->rbpl_base == NULL) { | ||||||
| 		hprintk("failed to alloc rbpl_base\n"); | 		hprintk("failed to alloc rbpl_base\n"); | ||||||
| 		goto out_destroy_rbpl_pool; | 		goto out_destroy_rbpl_pool; | ||||||
|  | @ -844,9 +845,9 @@ static int he_init_group(struct he_dev *he_dev, int group) | ||||||
| 
 | 
 | ||||||
| 	/* rx buffer ready queue */ | 	/* rx buffer ready queue */ | ||||||
| 
 | 
 | ||||||
| 	he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, | 	he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, | ||||||
| 						CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), | 					       CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), | ||||||
| 						&he_dev->rbrq_phys, GFP_KERNEL); | 					       &he_dev->rbrq_phys, GFP_KERNEL); | ||||||
| 	if (he_dev->rbrq_base == NULL) { | 	if (he_dev->rbrq_base == NULL) { | ||||||
| 		hprintk("failed to allocate rbrq\n"); | 		hprintk("failed to allocate rbrq\n"); | ||||||
| 		goto out_free_rbpl; | 		goto out_free_rbpl; | ||||||
|  | @ -868,9 +869,9 @@ static int he_init_group(struct he_dev *he_dev, int group) | ||||||
| 
 | 
 | ||||||
| 	/* tx buffer ready queue */ | 	/* tx buffer ready queue */ | ||||||
| 
 | 
 | ||||||
| 	he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, | 	he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, | ||||||
| 						CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), | 					       CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), | ||||||
| 						&he_dev->tbrq_phys, GFP_KERNEL); | 					       &he_dev->tbrq_phys, GFP_KERNEL); | ||||||
| 	if (he_dev->tbrq_base == NULL) { | 	if (he_dev->tbrq_base == NULL) { | ||||||
| 		hprintk("failed to allocate tbrq\n"); | 		hprintk("failed to allocate tbrq\n"); | ||||||
| 		goto out_free_rbpq_base; | 		goto out_free_rbpq_base; | ||||||
|  | @ -913,11 +914,9 @@ static int he_init_irq(struct he_dev *he_dev) | ||||||
| 	/* 2.9.3.5  tail offset for each interrupt queue is located after the
 | 	/* 2.9.3.5  tail offset for each interrupt queue is located after the
 | ||||||
| 		    end of the interrupt queue */ | 		    end of the interrupt queue */ | ||||||
| 
 | 
 | ||||||
| 	he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, | 	he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, | ||||||
| 					       (CONFIG_IRQ_SIZE + 1) | 					      (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq), | ||||||
| 					       * sizeof(struct he_irq), | 					      &he_dev->irq_phys, GFP_KERNEL); | ||||||
| 					       &he_dev->irq_phys, |  | ||||||
| 					       GFP_KERNEL); |  | ||||||
| 	if (he_dev->irq_base == NULL) { | 	if (he_dev->irq_base == NULL) { | ||||||
| 		hprintk("failed to allocate irq\n"); | 		hprintk("failed to allocate irq\n"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  | @ -1464,9 +1463,9 @@ static int he_start(struct atm_dev *dev) | ||||||
| 
 | 
 | ||||||
| 	/* host status page */ | 	/* host status page */ | ||||||
| 
 | 
 | ||||||
| 	he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev, | 	he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev, | ||||||
| 					  sizeof(struct he_hsp), | 					 sizeof(struct he_hsp), | ||||||
| 					  &he_dev->hsp_phys, GFP_KERNEL); | 					 &he_dev->hsp_phys, GFP_KERNEL); | ||||||
| 	if (he_dev->hsp == NULL) { | 	if (he_dev->hsp == NULL) { | ||||||
| 		hprintk("failed to allocate host status page\n"); | 		hprintk("failed to allocate host status page\n"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -641,8 +641,8 @@ alloc_scq(struct idt77252_dev *card, int class) | ||||||
| 	scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL); | 	scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL); | ||||||
| 	if (!scq) | 	if (!scq) | ||||||
| 		return NULL; | 		return NULL; | ||||||
| 	scq->base = dma_zalloc_coherent(&card->pcidev->dev, SCQ_SIZE, | 	scq->base = dma_alloc_coherent(&card->pcidev->dev, SCQ_SIZE, | ||||||
| 					&scq->paddr, GFP_KERNEL); | 				       &scq->paddr, GFP_KERNEL); | ||||||
| 	if (scq->base == NULL) { | 	if (scq->base == NULL) { | ||||||
| 		kfree(scq); | 		kfree(scq); | ||||||
| 		return NULL; | 		return NULL; | ||||||
|  | @ -971,8 +971,8 @@ init_rsq(struct idt77252_dev *card) | ||||||
| { | { | ||||||
| 	struct rsq_entry *rsqe; | 	struct rsq_entry *rsqe; | ||||||
| 
 | 
 | ||||||
| 	card->rsq.base = dma_zalloc_coherent(&card->pcidev->dev, RSQSIZE, | 	card->rsq.base = dma_alloc_coherent(&card->pcidev->dev, RSQSIZE, | ||||||
| 					     &card->rsq.paddr, GFP_KERNEL); | 					    &card->rsq.paddr, GFP_KERNEL); | ||||||
| 	if (card->rsq.base == NULL) { | 	if (card->rsq.base == NULL) { | ||||||
| 		printk("%s: can't allocate RSQ.\n", card->name); | 		printk("%s: can't allocate RSQ.\n", card->name); | ||||||
| 		return -1; | 		return -1; | ||||||
|  | @ -3390,10 +3390,10 @@ static int init_card(struct atm_dev *dev) | ||||||
| 	writel(0, SAR_REG_GP); | 	writel(0, SAR_REG_GP); | ||||||
| 
 | 
 | ||||||
| 	/* Initialize RAW Cell Handle Register  */ | 	/* Initialize RAW Cell Handle Register  */ | ||||||
| 	card->raw_cell_hnd = dma_zalloc_coherent(&card->pcidev->dev, | 	card->raw_cell_hnd = dma_alloc_coherent(&card->pcidev->dev, | ||||||
| 						 2 * sizeof(u32), | 						2 * sizeof(u32), | ||||||
| 						 &card->raw_cell_paddr, | 						&card->raw_cell_paddr, | ||||||
| 						 GFP_KERNEL); | 						GFP_KERNEL); | ||||||
| 	if (!card->raw_cell_hnd) { | 	if (!card->raw_cell_hnd) { | ||||||
| 		printk("%s: memory allocation failure.\n", card->name); | 		printk("%s: memory allocation failure.\n", card->name); | ||||||
| 		deinit_card(card); | 		deinit_card(card); | ||||||
|  |  | ||||||
|  | @ -2641,8 +2641,8 @@ static int skd_cons_skcomp(struct skd_device *skdev) | ||||||
| 		"comp pci_alloc, total bytes %zd entries %d\n", | 		"comp pci_alloc, total bytes %zd entries %d\n", | ||||||
| 		SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY); | 		SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY); | ||||||
| 
 | 
 | ||||||
| 	skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE, | 	skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE, | ||||||
| 				     &skdev->cq_dma_address, GFP_KERNEL); | 				    &skdev->cq_dma_address, GFP_KERNEL); | ||||||
| 
 | 
 | ||||||
| 	if (skcomp == NULL) { | 	if (skcomp == NULL) { | ||||||
| 		rc = -ENOMEM; | 		rc = -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -283,9 +283,9 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx) | ||||||
|  */ |  */ | ||||||
| static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) | static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) | ||||||
| { | { | ||||||
| 	dev->gdr = dma_zalloc_coherent(dev->core_dev->device, | 	dev->gdr = dma_alloc_coherent(dev->core_dev->device, | ||||||
| 				       sizeof(struct ce_gd) * PPC4XX_NUM_GD, | 				      sizeof(struct ce_gd) * PPC4XX_NUM_GD, | ||||||
| 				       &dev->gdr_pa, GFP_ATOMIC); | 				      &dev->gdr_pa, GFP_ATOMIC); | ||||||
| 	if (!dev->gdr) | 	if (!dev->gdr) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -278,8 +278,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) | ||||||
| 	mcode->num_cores = is_ae ? 6 : 10; | 	mcode->num_cores = is_ae ? 6 : 10; | ||||||
| 
 | 
 | ||||||
| 	/*  Allocate DMAable space */ | 	/*  Allocate DMAable space */ | ||||||
| 	mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size, | 	mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size, | ||||||
| 					  &mcode->phys_base, GFP_KERNEL); | 					 &mcode->phys_base, GFP_KERNEL); | ||||||
| 	if (!mcode->code) { | 	if (!mcode->code) { | ||||||
| 		dev_err(dev, "Unable to allocate space for microcode"); | 		dev_err(dev, "Unable to allocate space for microcode"); | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -236,9 +236,10 @@ static int alloc_command_queues(struct cpt_vf *cptvf, | ||||||
| 
 | 
 | ||||||
| 			c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes : | 			c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes : | ||||||
| 					rem_q_size; | 					rem_q_size; | ||||||
| 			curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev, | 			curr->head = (u8 *)dma_alloc_coherent(&pdev->dev, | ||||||
| 					  c_size + CPT_NEXT_CHUNK_PTR_SIZE, | 							      c_size + CPT_NEXT_CHUNK_PTR_SIZE, | ||||||
| 					  &curr->dma_addr, GFP_KERNEL); | 							      &curr->dma_addr, | ||||||
|  | 							      GFP_KERNEL); | ||||||
| 			if (!curr->head) { | 			if (!curr->head) { | ||||||
| 				dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n", | 				dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n", | ||||||
| 					i, queue->nchunks); | 					i, queue->nchunks); | ||||||
|  |  | ||||||
|  | @ -25,9 +25,9 @@ static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes) | ||||||
| 	struct nitrox_device *ndev = cmdq->ndev; | 	struct nitrox_device *ndev = cmdq->ndev; | ||||||
| 
 | 
 | ||||||
| 	cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; | 	cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; | ||||||
| 	cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize, | 	cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize, | ||||||
| 						 &cmdq->unalign_dma, | 						&cmdq->unalign_dma, | ||||||
| 						 GFP_KERNEL); | 						GFP_KERNEL); | ||||||
| 	if (!cmdq->unalign_base) | 	if (!cmdq->unalign_base) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -822,9 +822,9 @@ static int ccp5_init(struct ccp_device *ccp) | ||||||
| 		/* Page alignment satisfies our needs for N <= 128 */ | 		/* Page alignment satisfies our needs for N <= 128 */ | ||||||
| 		BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); | 		BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); | ||||||
| 		cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); | 		cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); | ||||||
| 		cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize, | 		cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize, | ||||||
| 						   &cmd_q->qbase_dma, | 						  &cmd_q->qbase_dma, | ||||||
| 						   GFP_KERNEL); | 						  GFP_KERNEL); | ||||||
| 		if (!cmd_q->qbase) { | 		if (!cmd_q->qbase) { | ||||||
| 			dev_err(dev, "unable to allocate command queue\n"); | 			dev_err(dev, "unable to allocate command queue\n"); | ||||||
| 			ret = -ENOMEM; | 			ret = -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -241,8 +241,8 @@ static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm, | ||||||
| 		memset(ctx->key, 0, SEC_MAX_CIPHER_KEY); | 		memset(ctx->key, 0, SEC_MAX_CIPHER_KEY); | ||||||
| 	} else { | 	} else { | ||||||
| 		/* new key */ | 		/* new key */ | ||||||
| 		ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY, | 		ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY, | ||||||
| 					       &ctx->pkey, GFP_KERNEL); | 					      &ctx->pkey, GFP_KERNEL); | ||||||
| 		if (!ctx->key) { | 		if (!ctx->key) { | ||||||
| 			mutex_unlock(&ctx->lock); | 			mutex_unlock(&ctx->lock); | ||||||
| 			return -ENOMEM; | 			return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -1082,9 +1082,8 @@ static int sec_queue_res_cfg(struct sec_queue *queue) | ||||||
| 	struct sec_queue_ring_db *ring_db = &queue->ring_db; | 	struct sec_queue_ring_db *ring_db = &queue->ring_db; | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 
 | ||||||
| 	ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE, | 	ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE, | ||||||
| 					      &ring_cmd->paddr, | 					     &ring_cmd->paddr, GFP_KERNEL); | ||||||
| 					      GFP_KERNEL); |  | ||||||
| 	if (!ring_cmd->vaddr) | 	if (!ring_cmd->vaddr) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -1092,17 +1091,15 @@ static int sec_queue_res_cfg(struct sec_queue *queue) | ||||||
| 	mutex_init(&ring_cmd->lock); | 	mutex_init(&ring_cmd->lock); | ||||||
| 	ring_cmd->callback = sec_alg_callback; | 	ring_cmd->callback = sec_alg_callback; | ||||||
| 
 | 
 | ||||||
| 	ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE, | 	ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE, | ||||||
| 					     &ring_cq->paddr, | 					    &ring_cq->paddr, GFP_KERNEL); | ||||||
| 					     GFP_KERNEL); |  | ||||||
| 	if (!ring_cq->vaddr) { | 	if (!ring_cq->vaddr) { | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
| 		goto err_free_ring_cmd; | 		goto err_free_ring_cmd; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE, | 	ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE, | ||||||
| 					     &ring_db->paddr, | 					    &ring_db->paddr, GFP_KERNEL); | ||||||
| 					     GFP_KERNEL); |  | ||||||
| 	if (!ring_db->vaddr) { | 	if (!ring_db->vaddr) { | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
| 		goto err_free_ring_cq; | 		goto err_free_ring_cq; | ||||||
|  |  | ||||||
|  | @ -260,9 +260,9 @@ static int setup_crypt_desc(void) | ||||||
| { | { | ||||||
| 	struct device *dev = &pdev->dev; | 	struct device *dev = &pdev->dev; | ||||||
| 	BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); | 	BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); | ||||||
| 	crypt_virt = dma_zalloc_coherent(dev, | 	crypt_virt = dma_alloc_coherent(dev, | ||||||
| 					 NPE_QLEN * sizeof(struct crypt_ctl), | 					NPE_QLEN * sizeof(struct crypt_ctl), | ||||||
| 					 &crypt_phys, GFP_ATOMIC); | 					&crypt_phys, GFP_ATOMIC); | ||||||
| 	if (!crypt_virt) | 	if (!crypt_virt) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	return 0; | 	return 0; | ||||||
|  |  | ||||||
|  | @ -453,17 +453,17 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp) | ||||||
| 		if (!ring[i]) | 		if (!ring[i]) | ||||||
| 			goto err_cleanup; | 			goto err_cleanup; | ||||||
| 
 | 
 | ||||||
| 		ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev, | 		ring[i]->cmd_base = dma_alloc_coherent(cryp->dev, | ||||||
| 					   MTK_DESC_RING_SZ, | 						       MTK_DESC_RING_SZ, | ||||||
| 					   &ring[i]->cmd_dma, | 						       &ring[i]->cmd_dma, | ||||||
| 					   GFP_KERNEL); | 						       GFP_KERNEL); | ||||||
| 		if (!ring[i]->cmd_base) | 		if (!ring[i]->cmd_base) | ||||||
| 			goto err_cleanup; | 			goto err_cleanup; | ||||||
| 
 | 
 | ||||||
| 		ring[i]->res_base = dma_zalloc_coherent(cryp->dev, | 		ring[i]->res_base = dma_alloc_coherent(cryp->dev, | ||||||
| 					   MTK_DESC_RING_SZ, | 						       MTK_DESC_RING_SZ, | ||||||
| 					   &ring[i]->res_dma, | 						       &ring[i]->res_dma, | ||||||
| 					   GFP_KERNEL); | 						       GFP_KERNEL); | ||||||
| 		if (!ring[i]->res_base) | 		if (!ring[i]->res_base) | ||||||
| 			goto err_cleanup; | 			goto err_cleanup; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -244,18 +244,18 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev) | ||||||
| 			     dev_to_node(&GET_DEV(accel_dev))); | 			     dev_to_node(&GET_DEV(accel_dev))); | ||||||
| 	if (!admin) | 	if (!admin) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | 	admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | ||||||
| 					       &admin->phy_addr, GFP_KERNEL); | 					      &admin->phy_addr, GFP_KERNEL); | ||||||
| 	if (!admin->virt_addr) { | 	if (!admin->virt_addr) { | ||||||
| 		dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); | 		dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); | ||||||
| 		kfree(admin); | 		kfree(admin); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), | 	admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev), | ||||||
| 						   PAGE_SIZE, | 						  PAGE_SIZE, | ||||||
| 						   &admin->const_tbl_addr, | 						  &admin->const_tbl_addr, | ||||||
| 						   GFP_KERNEL); | 						  GFP_KERNEL); | ||||||
| 	if (!admin->virt_tbl_addr) { | 	if (!admin->virt_tbl_addr) { | ||||||
| 		dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n"); | 		dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n"); | ||||||
| 		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | 		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | ||||||
|  |  | ||||||
|  | @ -601,15 +601,15 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, | ||||||
| 
 | 
 | ||||||
| 		dev = &GET_DEV(inst->accel_dev); | 		dev = &GET_DEV(inst->accel_dev); | ||||||
| 		ctx->inst = inst; | 		ctx->inst = inst; | ||||||
| 		ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), | 		ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), | ||||||
| 						  &ctx->enc_cd_paddr, | 						 &ctx->enc_cd_paddr, | ||||||
| 						  GFP_ATOMIC); | 						 GFP_ATOMIC); | ||||||
| 		if (!ctx->enc_cd) { | 		if (!ctx->enc_cd) { | ||||||
| 			return -ENOMEM; | 			return -ENOMEM; | ||||||
| 		} | 		} | ||||||
| 		ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), | 		ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), | ||||||
| 						  &ctx->dec_cd_paddr, | 						 &ctx->dec_cd_paddr, | ||||||
| 						  GFP_ATOMIC); | 						 GFP_ATOMIC); | ||||||
| 		if (!ctx->dec_cd) { | 		if (!ctx->dec_cd) { | ||||||
| 			goto out_free_enc; | 			goto out_free_enc; | ||||||
| 		} | 		} | ||||||
|  | @ -933,16 +933,16 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, | ||||||
| 
 | 
 | ||||||
| 		dev = &GET_DEV(inst->accel_dev); | 		dev = &GET_DEV(inst->accel_dev); | ||||||
| 		ctx->inst = inst; | 		ctx->inst = inst; | ||||||
| 		ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), | 		ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), | ||||||
| 						  &ctx->enc_cd_paddr, | 						 &ctx->enc_cd_paddr, | ||||||
| 						  GFP_ATOMIC); | 						 GFP_ATOMIC); | ||||||
| 		if (!ctx->enc_cd) { | 		if (!ctx->enc_cd) { | ||||||
| 			spin_unlock(&ctx->lock); | 			spin_unlock(&ctx->lock); | ||||||
| 			return -ENOMEM; | 			return -ENOMEM; | ||||||
| 		} | 		} | ||||||
| 		ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), | 		ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), | ||||||
| 						  &ctx->dec_cd_paddr, | 						 &ctx->dec_cd_paddr, | ||||||
| 						  GFP_ATOMIC); | 						 GFP_ATOMIC); | ||||||
| 		if (!ctx->dec_cd) { | 		if (!ctx->dec_cd) { | ||||||
| 			spin_unlock(&ctx->lock); | 			spin_unlock(&ctx->lock); | ||||||
| 			goto out_free_enc; | 			goto out_free_enc; | ||||||
|  |  | ||||||
|  | @ -332,10 +332,10 @@ static int qat_dh_compute_value(struct kpp_request *req) | ||||||
| 		} else { | 		} else { | ||||||
| 			int shift = ctx->p_size - req->src_len; | 			int shift = ctx->p_size - req->src_len; | ||||||
| 
 | 
 | ||||||
| 			qat_req->src_align = dma_zalloc_coherent(dev, | 			qat_req->src_align = dma_alloc_coherent(dev, | ||||||
| 								 ctx->p_size, | 								ctx->p_size, | ||||||
| 								 &qat_req->in.dh.in.b, | 								&qat_req->in.dh.in.b, | ||||||
| 								 GFP_KERNEL); | 								GFP_KERNEL); | ||||||
| 			if (unlikely(!qat_req->src_align)) | 			if (unlikely(!qat_req->src_align)) | ||||||
| 				return ret; | 				return ret; | ||||||
| 
 | 
 | ||||||
|  | @ -360,9 +360,9 @@ static int qat_dh_compute_value(struct kpp_request *req) | ||||||
| 			goto unmap_src; | 			goto unmap_src; | ||||||
| 
 | 
 | ||||||
| 	} else { | 	} else { | ||||||
| 		qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size, | 		qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size, | ||||||
| 							 &qat_req->out.dh.r, | 							&qat_req->out.dh.r, | ||||||
| 							 GFP_KERNEL); | 							GFP_KERNEL); | ||||||
| 		if (unlikely(!qat_req->dst_align)) | 		if (unlikely(!qat_req->dst_align)) | ||||||
| 			goto unmap_src; | 			goto unmap_src; | ||||||
| 	} | 	} | ||||||
|  | @ -447,7 +447,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
| 
 | 
 | ||||||
| 	ctx->p_size = params->p_size; | 	ctx->p_size = params->p_size; | ||||||
| 	ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); | 	ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); | ||||||
| 	if (!ctx->p) | 	if (!ctx->p) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	memcpy(ctx->p, params->p, ctx->p_size); | 	memcpy(ctx->p, params->p, ctx->p_size); | ||||||
|  | @ -458,7 +458,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) | ||||||
| 		return 0; | 		return 0; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); | 	ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); | ||||||
| 	if (!ctx->g) | 	if (!ctx->g) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, | 	memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, | ||||||
|  | @ -503,8 +503,8 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf, | ||||||
| 	if (ret < 0) | 	if (ret < 0) | ||||||
| 		goto err_clear_ctx; | 		goto err_clear_ctx; | ||||||
| 
 | 
 | ||||||
| 	ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa, | 	ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa, | ||||||
| 				      GFP_KERNEL); | 				     GFP_KERNEL); | ||||||
| 	if (!ctx->xa) { | 	if (!ctx->xa) { | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
| 		goto err_clear_ctx; | 		goto err_clear_ctx; | ||||||
|  | @ -737,9 +737,9 @@ static int qat_rsa_enc(struct akcipher_request *req) | ||||||
| 	} else { | 	} else { | ||||||
| 		int shift = ctx->key_sz - req->src_len; | 		int shift = ctx->key_sz - req->src_len; | ||||||
| 
 | 
 | ||||||
| 		qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, | 		qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, | ||||||
| 							 &qat_req->in.rsa.enc.m, | 							&qat_req->in.rsa.enc.m, | ||||||
| 							 GFP_KERNEL); | 							GFP_KERNEL); | ||||||
| 		if (unlikely(!qat_req->src_align)) | 		if (unlikely(!qat_req->src_align)) | ||||||
| 			return ret; | 			return ret; | ||||||
| 
 | 
 | ||||||
|  | @ -756,9 +756,9 @@ static int qat_rsa_enc(struct akcipher_request *req) | ||||||
| 			goto unmap_src; | 			goto unmap_src; | ||||||
| 
 | 
 | ||||||
| 	} else { | 	} else { | ||||||
| 		qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, | 		qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, | ||||||
| 							 &qat_req->out.rsa.enc.c, | 							&qat_req->out.rsa.enc.c, | ||||||
| 							 GFP_KERNEL); | 							GFP_KERNEL); | ||||||
| 		if (unlikely(!qat_req->dst_align)) | 		if (unlikely(!qat_req->dst_align)) | ||||||
| 			goto unmap_src; | 			goto unmap_src; | ||||||
| 
 | 
 | ||||||
|  | @ -881,9 +881,9 @@ static int qat_rsa_dec(struct akcipher_request *req) | ||||||
| 	} else { | 	} else { | ||||||
| 		int shift = ctx->key_sz - req->src_len; | 		int shift = ctx->key_sz - req->src_len; | ||||||
| 
 | 
 | ||||||
| 		qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, | 		qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, | ||||||
| 							 &qat_req->in.rsa.dec.c, | 							&qat_req->in.rsa.dec.c, | ||||||
| 							 GFP_KERNEL); | 							GFP_KERNEL); | ||||||
| 		if (unlikely(!qat_req->src_align)) | 		if (unlikely(!qat_req->src_align)) | ||||||
| 			return ret; | 			return ret; | ||||||
| 
 | 
 | ||||||
|  | @ -900,9 +900,9 @@ static int qat_rsa_dec(struct akcipher_request *req) | ||||||
| 			goto unmap_src; | 			goto unmap_src; | ||||||
| 
 | 
 | ||||||
| 	} else { | 	} else { | ||||||
| 		qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, | 		qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, | ||||||
| 							 &qat_req->out.rsa.dec.m, | 							&qat_req->out.rsa.dec.m, | ||||||
| 							 GFP_KERNEL); | 							GFP_KERNEL); | ||||||
| 		if (unlikely(!qat_req->dst_align)) | 		if (unlikely(!qat_req->dst_align)) | ||||||
| 			goto unmap_src; | 			goto unmap_src; | ||||||
| 
 | 
 | ||||||
|  | @ -989,7 +989,7 @@ static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, | ||||||
| 		goto err; | 		goto err; | ||||||
| 
 | 
 | ||||||
| 	ret = -ENOMEM; | 	ret = -ENOMEM; | ||||||
| 	ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); | 	ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); | ||||||
| 	if (!ctx->n) | 	if (!ctx->n) | ||||||
| 		goto err; | 		goto err; | ||||||
| 
 | 
 | ||||||
|  | @ -1018,7 +1018,7 @@ static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); | 	ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); | ||||||
| 	if (!ctx->e) | 	if (!ctx->e) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -1044,7 +1044,7 @@ static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, | ||||||
| 		goto err; | 		goto err; | ||||||
| 
 | 
 | ||||||
| 	ret = -ENOMEM; | 	ret = -ENOMEM; | ||||||
| 	ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); | 	ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); | ||||||
| 	if (!ctx->d) | 	if (!ctx->d) | ||||||
| 		goto err; | 		goto err; | ||||||
| 
 | 
 | ||||||
|  | @ -1077,7 +1077,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) | ||||||
| 	qat_rsa_drop_leading_zeros(&ptr, &len); | 	qat_rsa_drop_leading_zeros(&ptr, &len); | ||||||
| 	if (!len) | 	if (!len) | ||||||
| 		goto err; | 		goto err; | ||||||
| 	ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); | 	ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); | ||||||
| 	if (!ctx->p) | 	if (!ctx->p) | ||||||
| 		goto err; | 		goto err; | ||||||
| 	memcpy(ctx->p + (half_key_sz - len), ptr, len); | 	memcpy(ctx->p + (half_key_sz - len), ptr, len); | ||||||
|  | @ -1088,7 +1088,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) | ||||||
| 	qat_rsa_drop_leading_zeros(&ptr, &len); | 	qat_rsa_drop_leading_zeros(&ptr, &len); | ||||||
| 	if (!len) | 	if (!len) | ||||||
| 		goto free_p; | 		goto free_p; | ||||||
| 	ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); | 	ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); | ||||||
| 	if (!ctx->q) | 	if (!ctx->q) | ||||||
| 		goto free_p; | 		goto free_p; | ||||||
| 	memcpy(ctx->q + (half_key_sz - len), ptr, len); | 	memcpy(ctx->q + (half_key_sz - len), ptr, len); | ||||||
|  | @ -1099,8 +1099,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) | ||||||
| 	qat_rsa_drop_leading_zeros(&ptr, &len); | 	qat_rsa_drop_leading_zeros(&ptr, &len); | ||||||
| 	if (!len) | 	if (!len) | ||||||
| 		goto free_q; | 		goto free_q; | ||||||
| 	ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp, | 	ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp, | ||||||
| 				      GFP_KERNEL); | 				     GFP_KERNEL); | ||||||
| 	if (!ctx->dp) | 	if (!ctx->dp) | ||||||
| 		goto free_q; | 		goto free_q; | ||||||
| 	memcpy(ctx->dp + (half_key_sz - len), ptr, len); | 	memcpy(ctx->dp + (half_key_sz - len), ptr, len); | ||||||
|  | @ -1111,8 +1111,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) | ||||||
| 	qat_rsa_drop_leading_zeros(&ptr, &len); | 	qat_rsa_drop_leading_zeros(&ptr, &len); | ||||||
| 	if (!len) | 	if (!len) | ||||||
| 		goto free_dp; | 		goto free_dp; | ||||||
| 	ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq, | 	ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq, | ||||||
| 				      GFP_KERNEL); | 				     GFP_KERNEL); | ||||||
| 	if (!ctx->dq) | 	if (!ctx->dq) | ||||||
| 		goto free_dp; | 		goto free_dp; | ||||||
| 	memcpy(ctx->dq + (half_key_sz - len), ptr, len); | 	memcpy(ctx->dq + (half_key_sz - len), ptr, len); | ||||||
|  | @ -1123,8 +1123,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) | ||||||
| 	qat_rsa_drop_leading_zeros(&ptr, &len); | 	qat_rsa_drop_leading_zeros(&ptr, &len); | ||||||
| 	if (!len) | 	if (!len) | ||||||
| 		goto free_dq; | 		goto free_dq; | ||||||
| 	ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv, | 	ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv, | ||||||
| 					GFP_KERNEL); | 				       GFP_KERNEL); | ||||||
| 	if (!ctx->qinv) | 	if (!ctx->qinv) | ||||||
| 		goto free_dq; | 		goto free_dq; | ||||||
| 	memcpy(ctx->qinv + (half_key_sz - len), ptr, len); | 	memcpy(ctx->qinv + (half_key_sz - len), ptr, len); | ||||||
|  |  | ||||||
|  | @ -1182,8 +1182,8 @@ static int sdma_request_channel0(struct sdma_engine *sdma) | ||||||
| { | { | ||||||
| 	int ret = -EBUSY; | 	int ret = -EBUSY; | ||||||
| 
 | 
 | ||||||
| 	sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, | 	sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, | ||||||
| 					GFP_NOWAIT); | 				       GFP_NOWAIT); | ||||||
| 	if (!sdma->bd0) { | 	if (!sdma->bd0) { | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
| 		goto out; | 		goto out; | ||||||
|  | @ -1205,8 +1205,8 @@ static int sdma_alloc_bd(struct sdma_desc *desc) | ||||||
| 	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); | 	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); | ||||||
| 	int ret = 0; | 	int ret = 0; | ||||||
| 
 | 
 | ||||||
| 	desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys, | 	desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys, | ||||||
| 					GFP_NOWAIT); | 				      GFP_NOWAIT); | ||||||
| 	if (!desc->bd) { | 	if (!desc->bd) { | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
| 		goto out; | 		goto out; | ||||||
|  |  | ||||||
|  | @ -325,8 +325,8 @@ static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma, | ||||||
| 	 * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring. | 	 * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring. | ||||||
| 	 */ | 	 */ | ||||||
| 	pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd); | 	pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd); | ||||||
| 	ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring, | 	ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring, | ||||||
| 					&ring->tphys, GFP_NOWAIT); | 				       &ring->tphys, GFP_NOWAIT); | ||||||
| 	if (!ring->txd) | 	if (!ring->txd) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -416,9 +416,9 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | ||||||
| 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 
 | ||||||
| 	mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev, | 	mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, | ||||||
| 					    CCW_BLOCK_SIZE, | 					   CCW_BLOCK_SIZE, | ||||||
| 					    &mxs_chan->ccw_phys, GFP_KERNEL); | 					   &mxs_chan->ccw_phys, GFP_KERNEL); | ||||||
| 	if (!mxs_chan->ccw) { | 	if (!mxs_chan->ccw) { | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
| 		goto err_alloc; | 		goto err_alloc; | ||||||
|  |  | ||||||
|  | @ -1208,8 +1208,8 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, | ||||||
| 	ring->size = ret; | 	ring->size = ret; | ||||||
| 
 | 
 | ||||||
| 	/* Allocate memory for DMA ring descriptor */ | 	/* Allocate memory for DMA ring descriptor */ | ||||||
| 	ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, | 	ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size, | ||||||
| 					       &ring->desc_paddr, GFP_KERNEL); | 					      &ring->desc_paddr, GFP_KERNEL); | ||||||
| 	if (!ring->desc_vaddr) { | 	if (!ring->desc_vaddr) { | ||||||
| 		chan_err(chan, "Failed to allocate ring desc\n"); | 		chan_err(chan, "Failed to allocate ring desc\n"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -879,10 +879,9 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) | ||||||
| 	 */ | 	 */ | ||||||
| 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { | 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { | ||||||
| 		/* Allocate the buffer descriptors. */ | 		/* Allocate the buffer descriptors. */ | ||||||
| 		chan->seg_v = dma_zalloc_coherent(chan->dev, | 		chan->seg_v = dma_alloc_coherent(chan->dev, | ||||||
| 						  sizeof(*chan->seg_v) * | 						 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS, | ||||||
| 						  XILINX_DMA_NUM_DESCS, | 						 &chan->seg_p, GFP_KERNEL); | ||||||
| 						  &chan->seg_p, GFP_KERNEL); |  | ||||||
| 		if (!chan->seg_v) { | 		if (!chan->seg_v) { | ||||||
| 			dev_err(chan->dev, | 			dev_err(chan->dev, | ||||||
| 				"unable to allocate channel %d descriptors\n", | 				"unable to allocate channel %d descriptors\n", | ||||||
|  | @ -895,9 +894,10 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) | ||||||
| 		 * so allocating a desc segment during channel allocation for | 		 * so allocating a desc segment during channel allocation for | ||||||
| 		 * programming tail descriptor. | 		 * programming tail descriptor. | ||||||
| 		 */ | 		 */ | ||||||
| 		chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, | 		chan->cyclic_seg_v = dma_alloc_coherent(chan->dev, | ||||||
| 					sizeof(*chan->cyclic_seg_v), | 							sizeof(*chan->cyclic_seg_v), | ||||||
| 					&chan->cyclic_seg_p, GFP_KERNEL); | 							&chan->cyclic_seg_p, | ||||||
|  | 							GFP_KERNEL); | ||||||
| 		if (!chan->cyclic_seg_v) { | 		if (!chan->cyclic_seg_v) { | ||||||
| 			dev_err(chan->dev, | 			dev_err(chan->dev, | ||||||
| 				"unable to allocate desc segment for cyclic DMA\n"); | 				"unable to allocate desc segment for cyclic DMA\n"); | ||||||
|  |  | ||||||
|  | @ -490,9 +490,9 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) | ||||||
| 		list_add_tail(&desc->node, &chan->free_list); | 		list_add_tail(&desc->node, &chan->free_list); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	chan->desc_pool_v = dma_zalloc_coherent(chan->dev, | 	chan->desc_pool_v = dma_alloc_coherent(chan->dev, | ||||||
| 				(2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), | 					       (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), | ||||||
| 				&chan->desc_pool_p, GFP_KERNEL); | 					       &chan->desc_pool_p, GFP_KERNEL); | ||||||
| 	if (!chan->desc_pool_v) | 	if (!chan->desc_pool_v) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -61,8 +61,9 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali | ||||||
| 		return NULL; | 		return NULL; | ||||||
| 
 | 
 | ||||||
| 	dmah->size = size; | 	dmah->size = size; | ||||||
| 	dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, | 	dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, | ||||||
| 						GFP_KERNEL | __GFP_COMP); | 					 &dmah->busaddr, | ||||||
|  | 					 GFP_KERNEL | __GFP_COMP); | ||||||
| 
 | 
 | ||||||
| 	if (dmah->vaddr == NULL) { | 	if (dmah->vaddr == NULL) { | ||||||
| 		kfree(dmah); | 		kfree(dmah); | ||||||
|  |  | ||||||
|  | @ -766,8 +766,8 @@ struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( | ||||||
| 		return NULL; | 		return NULL; | ||||||
| 
 | 
 | ||||||
| 	sbuf->size = size; | 	sbuf->size = size; | ||||||
| 	sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, | 	sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size, | ||||||
| 				       &sbuf->dma_addr, GFP_ATOMIC); | 				      &sbuf->dma_addr, GFP_ATOMIC); | ||||||
| 	if (!sbuf->sb) | 	if (!sbuf->sb) | ||||||
| 		goto bail; | 		goto bail; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -105,10 +105,10 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, | ||||||
| 
 | 
 | ||||||
| 	if (!sghead) { | 	if (!sghead) { | ||||||
| 		for (i = 0; i < pages; i++) { | 		for (i = 0; i < pages; i++) { | ||||||
| 			pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev, | 			pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev, | ||||||
| 							     pbl->pg_size, | 							    pbl->pg_size, | ||||||
| 							     &pbl->pg_map_arr[i], | 							    &pbl->pg_map_arr[i], | ||||||
| 							     GFP_KERNEL); | 							    GFP_KERNEL); | ||||||
| 			if (!pbl->pg_arr[i]) | 			if (!pbl->pg_arr[i]) | ||||||
| 				goto fail; | 				goto fail; | ||||||
| 			pbl->pg_count++; | 			pbl->pg_count++; | ||||||
|  |  | ||||||
|  | @ -291,9 +291,9 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain, | ||||||
| 	if (!wq->sq) | 	if (!wq->sq) | ||||||
| 		goto err3; | 		goto err3; | ||||||
| 
 | 
 | ||||||
| 	wq->queue = dma_zalloc_coherent(&(rdev_p->rnic_info.pdev->dev), | 	wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), | ||||||
| 					     depth * sizeof(union t3_wr), | 				       depth * sizeof(union t3_wr), | ||||||
| 					     &(wq->dma_addr), GFP_KERNEL); | 				       &(wq->dma_addr), GFP_KERNEL); | ||||||
| 	if (!wq->queue) | 	if (!wq->queue) | ||||||
| 		goto err4; | 		goto err4; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -2564,9 +2564,8 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx, | ||||||
| 	wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >> | 	wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >> | ||||||
| 		T4_RQT_ENTRY_SHIFT; | 		T4_RQT_ENTRY_SHIFT; | ||||||
| 
 | 
 | ||||||
| 	wq->queue = dma_zalloc_coherent(&rdev->lldi.pdev->dev, | 	wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize, | ||||||
| 				       wq->memsize, &wq->dma_addr, | 				       &wq->dma_addr, GFP_KERNEL); | ||||||
| 			GFP_KERNEL); |  | ||||||
| 	if (!wq->queue) | 	if (!wq->queue) | ||||||
| 		goto err_free_rqtpool; | 		goto err_free_rqtpool; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -899,10 +899,10 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit) | ||||||
| 		goto done; | 		goto done; | ||||||
| 
 | 
 | ||||||
| 	/* allocate dummy tail memory for all receive contexts */ | 	/* allocate dummy tail memory for all receive contexts */ | ||||||
| 	dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent( | 	dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, | ||||||
| 		&dd->pcidev->dev, sizeof(u64), | 							 sizeof(u64), | ||||||
| 		&dd->rcvhdrtail_dummy_dma, | 							 &dd->rcvhdrtail_dummy_dma, | ||||||
| 		GFP_KERNEL); | 							 GFP_KERNEL); | ||||||
| 
 | 
 | ||||||
| 	if (!dd->rcvhdrtail_dummy_kvaddr) { | 	if (!dd->rcvhdrtail_dummy_kvaddr) { | ||||||
| 		dd_dev_err(dd, "cannot allocate dummy tail memory\n"); | 		dd_dev_err(dd, "cannot allocate dummy tail memory\n"); | ||||||
|  | @ -1863,9 +1863,9 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) | ||||||
| 			gfp_flags = GFP_KERNEL; | 			gfp_flags = GFP_KERNEL; | ||||||
| 		else | 		else | ||||||
| 			gfp_flags = GFP_USER; | 			gfp_flags = GFP_USER; | ||||||
| 		rcd->rcvhdrq = dma_zalloc_coherent( | 		rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, | ||||||
| 			&dd->pcidev->dev, amt, &rcd->rcvhdrq_dma, | 						  &rcd->rcvhdrq_dma, | ||||||
| 			gfp_flags | __GFP_COMP); | 						  gfp_flags | __GFP_COMP); | ||||||
| 
 | 
 | ||||||
| 		if (!rcd->rcvhdrq) { | 		if (!rcd->rcvhdrq) { | ||||||
| 			dd_dev_err(dd, | 			dd_dev_err(dd, | ||||||
|  | @ -1876,9 +1876,10 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) | ||||||
| 
 | 
 | ||||||
| 		if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || | 		if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || | ||||||
| 		    HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { | 		    HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { | ||||||
| 			rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( | 			rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, | ||||||
| 				&dd->pcidev->dev, PAGE_SIZE, | 								    PAGE_SIZE, | ||||||
| 				&rcd->rcvhdrqtailaddr_dma, gfp_flags); | 								    &rcd->rcvhdrqtailaddr_dma, | ||||||
|  | 								    gfp_flags); | ||||||
| 			if (!rcd->rcvhdrtail_kvaddr) | 			if (!rcd->rcvhdrtail_kvaddr) | ||||||
| 				goto bail_free; | 				goto bail_free; | ||||||
| 		} | 		} | ||||||
|  | @ -1974,10 +1975,10 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) | ||||||
| 	while (alloced_bytes < rcd->egrbufs.size && | 	while (alloced_bytes < rcd->egrbufs.size && | ||||||
| 	       rcd->egrbufs.alloced < rcd->egrbufs.count) { | 	       rcd->egrbufs.alloced < rcd->egrbufs.count) { | ||||||
| 		rcd->egrbufs.buffers[idx].addr = | 		rcd->egrbufs.buffers[idx].addr = | ||||||
| 			dma_zalloc_coherent(&dd->pcidev->dev, | 			dma_alloc_coherent(&dd->pcidev->dev, | ||||||
| 					    rcd->egrbufs.rcvtid_size, | 					   rcd->egrbufs.rcvtid_size, | ||||||
| 					    &rcd->egrbufs.buffers[idx].dma, | 					   &rcd->egrbufs.buffers[idx].dma, | ||||||
| 					    gfp_flags); | 					   gfp_flags); | ||||||
| 		if (rcd->egrbufs.buffers[idx].addr) { | 		if (rcd->egrbufs.buffers[idx].addr) { | ||||||
| 			rcd->egrbufs.buffers[idx].len = | 			rcd->egrbufs.buffers[idx].len = | ||||||
| 				rcd->egrbufs.rcvtid_size; | 				rcd->egrbufs.rcvtid_size; | ||||||
|  |  | ||||||
|  | @ -2098,11 +2098,10 @@ int init_credit_return(struct hfi1_devdata *dd) | ||||||
| 		int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return); | 		int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return); | ||||||
| 
 | 
 | ||||||
| 		set_dev_node(&dd->pcidev->dev, i); | 		set_dev_node(&dd->pcidev->dev, i); | ||||||
| 		dd->cr_base[i].va = dma_zalloc_coherent( | 		dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev, | ||||||
| 					&dd->pcidev->dev, | 						       bytes, | ||||||
| 					bytes, | 						       &dd->cr_base[i].dma, | ||||||
| 					&dd->cr_base[i].dma, | 						       GFP_KERNEL); | ||||||
| 					GFP_KERNEL); |  | ||||||
| 		if (!dd->cr_base[i].va) { | 		if (!dd->cr_base[i].va) { | ||||||
| 			set_dev_node(&dd->pcidev->dev, dd->node); | 			set_dev_node(&dd->pcidev->dev, dd->node); | ||||||
| 			dd_dev_err(dd, | 			dd_dev_err(dd, | ||||||
|  |  | ||||||
|  | @ -1453,12 +1453,9 @@ int sdma_init(struct hfi1_devdata *dd, u8 port) | ||||||
| 		timer_setup(&sde->err_progress_check_timer, | 		timer_setup(&sde->err_progress_check_timer, | ||||||
| 			    sdma_err_progress_check, 0); | 			    sdma_err_progress_check, 0); | ||||||
| 
 | 
 | ||||||
| 		sde->descq = dma_zalloc_coherent( | 		sde->descq = dma_alloc_coherent(&dd->pcidev->dev, | ||||||
| 			&dd->pcidev->dev, | 						descq_cnt * sizeof(u64[2]), | ||||||
| 			descq_cnt * sizeof(u64[2]), | 						&sde->descq_phys, GFP_KERNEL); | ||||||
| 			&sde->descq_phys, |  | ||||||
| 			GFP_KERNEL |  | ||||||
| 		); |  | ||||||
| 		if (!sde->descq) | 		if (!sde->descq) | ||||||
| 			goto bail; | 			goto bail; | ||||||
| 		sde->tx_ring = | 		sde->tx_ring = | ||||||
|  | @ -1471,24 +1468,18 @@ int sdma_init(struct hfi1_devdata *dd, u8 port) | ||||||
| 
 | 
 | ||||||
| 	dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; | 	dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; | ||||||
| 	/* Allocate memory for DMA of head registers to memory */ | 	/* Allocate memory for DMA of head registers to memory */ | ||||||
| 	dd->sdma_heads_dma = dma_zalloc_coherent( | 	dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev, | ||||||
| 		&dd->pcidev->dev, | 						dd->sdma_heads_size, | ||||||
| 		dd->sdma_heads_size, | 						&dd->sdma_heads_phys, | ||||||
| 		&dd->sdma_heads_phys, | 						GFP_KERNEL); | ||||||
| 		GFP_KERNEL |  | ||||||
| 	); |  | ||||||
| 	if (!dd->sdma_heads_dma) { | 	if (!dd->sdma_heads_dma) { | ||||||
| 		dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); | 		dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); | ||||||
| 		goto bail; | 		goto bail; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* Allocate memory for pad */ | 	/* Allocate memory for pad */ | ||||||
| 	dd->sdma_pad_dma = dma_zalloc_coherent( | 	dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32), | ||||||
| 		&dd->pcidev->dev, | 					      &dd->sdma_pad_phys, GFP_KERNEL); | ||||||
| 		sizeof(u32), |  | ||||||
| 		&dd->sdma_pad_phys, |  | ||||||
| 		GFP_KERNEL |  | ||||||
| 	); |  | ||||||
| 	if (!dd->sdma_pad_dma) { | 	if (!dd->sdma_pad_dma) { | ||||||
| 		dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); | 		dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); | ||||||
| 		goto bail; | 		goto bail; | ||||||
|  |  | ||||||
|  | @ -197,8 +197,8 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, | ||||||
| 		buf->npages = 1 << order; | 		buf->npages = 1 << order; | ||||||
| 		buf->page_shift = page_shift; | 		buf->page_shift = page_shift; | ||||||
| 		/* MTT PA must be recorded in 4k alignment, t is 4k aligned */ | 		/* MTT PA must be recorded in 4k alignment, t is 4k aligned */ | ||||||
| 		buf->direct.buf = dma_zalloc_coherent(dev, | 		buf->direct.buf = dma_alloc_coherent(dev, size, &t, | ||||||
| 						      size, &t, GFP_KERNEL); | 						     GFP_KERNEL); | ||||||
| 		if (!buf->direct.buf) | 		if (!buf->direct.buf) | ||||||
| 			return -ENOMEM; | 			return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -219,9 +219,10 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, | ||||||
| 			return -ENOMEM; | 			return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 		for (i = 0; i < buf->nbufs; ++i) { | 		for (i = 0; i < buf->nbufs; ++i) { | ||||||
| 			buf->page_list[i].buf = dma_zalloc_coherent(dev, | 			buf->page_list[i].buf = dma_alloc_coherent(dev, | ||||||
| 								  page_size, &t, | 								   page_size, | ||||||
| 								  GFP_KERNEL); | 								   &t, | ||||||
|  | 								   GFP_KERNEL); | ||||||
| 
 | 
 | ||||||
| 			if (!buf->page_list[i].buf) | 			if (!buf->page_list[i].buf) | ||||||
| 				goto err_free; | 				goto err_free; | ||||||
|  |  | ||||||
|  | @ -5091,7 +5091,7 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, | ||||||
| 				eqe_alloc = i * (buf_chk_sz / eq->eqe_size); | 				eqe_alloc = i * (buf_chk_sz / eq->eqe_size); | ||||||
| 				size = (eq->entries - eqe_alloc) * eq->eqe_size; | 				size = (eq->entries - eqe_alloc) * eq->eqe_size; | ||||||
| 			} | 			} | ||||||
| 			eq->buf[i] = dma_zalloc_coherent(dev, size, | 			eq->buf[i] = dma_alloc_coherent(dev, size, | ||||||
| 							&(eq->buf_dma[i]), | 							&(eq->buf_dma[i]), | ||||||
| 							GFP_KERNEL); | 							GFP_KERNEL); | ||||||
| 			if (!eq->buf[i]) | 			if (!eq->buf[i]) | ||||||
|  | @ -5126,9 +5126,9 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, | ||||||
| 					size = (eq->entries - eqe_alloc) | 					size = (eq->entries - eqe_alloc) | ||||||
| 						* eq->eqe_size; | 						* eq->eqe_size; | ||||||
| 				} | 				} | ||||||
| 				eq->buf[idx] = dma_zalloc_coherent(dev, size, | 				eq->buf[idx] = dma_alloc_coherent(dev, size, | ||||||
| 							    &(eq->buf_dma[idx]), | 								  &(eq->buf_dma[idx]), | ||||||
| 							    GFP_KERNEL); | 								  GFP_KERNEL); | ||||||
| 				if (!eq->buf[idx]) | 				if (!eq->buf[idx]) | ||||||
| 					goto err_dma_alloc_buf; | 					goto err_dma_alloc_buf; | ||||||
| 
 | 
 | ||||||
|  | @ -5241,7 +5241,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, | ||||||
| 			goto free_cmd_mbox; | 			goto free_cmd_mbox; | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		eq->buf_list->buf = dma_zalloc_coherent(dev, buf_chk_sz, | 		eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz, | ||||||
| 						       &(eq->buf_list->map), | 						       &(eq->buf_list->map), | ||||||
| 						       GFP_KERNEL); | 						       GFP_KERNEL); | ||||||
| 		if (!eq->buf_list->buf) { | 		if (!eq->buf_list->buf) { | ||||||
|  |  | ||||||
|  | @ -745,8 +745,8 @@ enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw, | ||||||
| 	if (!mem) | 	if (!mem) | ||||||
| 		return I40IW_ERR_PARAM; | 		return I40IW_ERR_PARAM; | ||||||
| 	mem->size = ALIGN(size, alignment); | 	mem->size = ALIGN(size, alignment); | ||||||
| 	mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size, | 	mem->va = dma_alloc_coherent(&pcidev->dev, mem->size, | ||||||
| 				      (dma_addr_t *)&mem->pa, GFP_KERNEL); | 				     (dma_addr_t *)&mem->pa, GFP_KERNEL); | ||||||
| 	if (!mem->va) | 	if (!mem->va) | ||||||
| 		return I40IW_ERR_NO_MEMORY; | 		return I40IW_ERR_NO_MEMORY; | ||||||
| 	return 0; | 	return 0; | ||||||
|  |  | ||||||
|  | @ -623,8 +623,9 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type, | ||||||
| 	page = dev->db_tab->page + end; | 	page = dev->db_tab->page + end; | ||||||
| 
 | 
 | ||||||
| alloc: | alloc: | ||||||
| 	page->db_rec = dma_zalloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, | 	page->db_rec = dma_alloc_coherent(&dev->pdev->dev, | ||||||
| 					   &page->mapping, GFP_KERNEL); | 					  MTHCA_ICM_PAGE_SIZE, &page->mapping, | ||||||
|  | 					  GFP_KERNEL); | ||||||
| 	if (!page->db_rec) { | 	if (!page->db_rec) { | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
| 		goto out; | 		goto out; | ||||||
|  |  | ||||||
|  | @ -380,8 +380,8 @@ static int ocrdma_alloc_q(struct ocrdma_dev *dev, | ||||||
| 	q->len = len; | 	q->len = len; | ||||||
| 	q->entry_size = entry_size; | 	q->entry_size = entry_size; | ||||||
| 	q->size = len * entry_size; | 	q->size = len * entry_size; | ||||||
| 	q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size, | 	q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma, | ||||||
| 				    &q->dma, GFP_KERNEL); | 				   GFP_KERNEL); | ||||||
| 	if (!q->va) | 	if (!q->va) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	return 0; | 	return 0; | ||||||
|  | @ -1819,7 +1819,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq, | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ, | 	ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ, | ||||||
| 			OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); | 			OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); | ||||||
| 	cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); | 	cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); | ||||||
| 	if (!cq->va) { | 	if (!cq->va) { | ||||||
| 		status = -ENOMEM; | 		status = -ENOMEM; | ||||||
| 		goto mem_err; | 		goto mem_err; | ||||||
|  | @ -2209,7 +2209,7 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd, | ||||||
| 	qp->sq.max_cnt = max_wqe_allocated; | 	qp->sq.max_cnt = max_wqe_allocated; | ||||||
| 	len = (hw_pages * hw_page_size); | 	len = (hw_pages * hw_page_size); | ||||||
| 
 | 
 | ||||||
| 	qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); | 	qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); | ||||||
| 	if (!qp->sq.va) | 	if (!qp->sq.va) | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
| 	qp->sq.len = len; | 	qp->sq.len = len; | ||||||
|  | @ -2259,7 +2259,7 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd, | ||||||
| 	qp->rq.max_cnt = max_rqe_allocated; | 	qp->rq.max_cnt = max_rqe_allocated; | ||||||
| 	len = (hw_pages * hw_page_size); | 	len = (hw_pages * hw_page_size); | ||||||
| 
 | 
 | ||||||
| 	qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); | 	qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); | ||||||
| 	if (!qp->rq.va) | 	if (!qp->rq.va) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	qp->rq.pa = pa; | 	qp->rq.pa = pa; | ||||||
|  | @ -2315,8 +2315,8 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd, | ||||||
| 	if (dev->attr.ird == 0) | 	if (dev->attr.ird == 0) | ||||||
| 		return 0; | 		return 0; | ||||||
| 
 | 
 | ||||||
| 	qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa, | 	qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa, | ||||||
| 					   GFP_KERNEL); | 					  GFP_KERNEL); | ||||||
| 	if (!qp->ird_q_va) | 	if (!qp->ird_q_va) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages, | 	ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages, | ||||||
|  |  | ||||||
|  | @ -73,8 +73,8 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev) | ||||||
| 	mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), | 	mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), | ||||||
| 			sizeof(struct ocrdma_rdma_stats_resp)); | 			sizeof(struct ocrdma_rdma_stats_resp)); | ||||||
| 
 | 
 | ||||||
| 	mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size, | 	mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size, | ||||||
| 				      &mem->pa, GFP_KERNEL); | 				     &mem->pa, GFP_KERNEL); | ||||||
| 	if (!mem->va) { | 	if (!mem->va) { | ||||||
| 		pr_err("%s: stats mbox allocation failed\n", __func__); | 		pr_err("%s: stats mbox allocation failed\n", __func__); | ||||||
| 		return false; | 		return false; | ||||||
|  |  | ||||||
|  | @ -504,8 +504,8 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, | ||||||
| 	INIT_LIST_HEAD(&ctx->mm_head); | 	INIT_LIST_HEAD(&ctx->mm_head); | ||||||
| 	mutex_init(&ctx->mm_list_lock); | 	mutex_init(&ctx->mm_list_lock); | ||||||
| 
 | 
 | ||||||
| 	ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len, | 	ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len, | ||||||
| 					     &ctx->ah_tbl.pa, GFP_KERNEL); | 					    &ctx->ah_tbl.pa, GFP_KERNEL); | ||||||
| 	if (!ctx->ah_tbl.va) { | 	if (!ctx->ah_tbl.va) { | ||||||
| 		kfree(ctx); | 		kfree(ctx); | ||||||
| 		return ERR_PTR(-ENOMEM); | 		return ERR_PTR(-ENOMEM); | ||||||
|  | @ -838,7 +838,7 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 	for (i = 0; i < mr->num_pbls; i++) { | 	for (i = 0; i < mr->num_pbls; i++) { | ||||||
| 		va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); | 		va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); | ||||||
| 		if (!va) { | 		if (!va) { | ||||||
| 			ocrdma_free_mr_pbl_tbl(dev, mr); | 			ocrdma_free_mr_pbl_tbl(dev, mr); | ||||||
| 			status = -ENOMEM; | 			status = -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -556,8 +556,8 @@ static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev, | ||||||
| 		return ERR_PTR(-ENOMEM); | 		return ERR_PTR(-ENOMEM); | ||||||
| 
 | 
 | ||||||
| 	for (i = 0; i < pbl_info->num_pbls; i++) { | 	for (i = 0; i < pbl_info->num_pbls; i++) { | ||||||
| 		va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size, | 		va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa, | ||||||
| 					 &pa, flags); | 					flags); | ||||||
| 		if (!va) | 		if (!va) | ||||||
| 			goto err; | 			goto err; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -890,8 +890,8 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, | ||||||
| 	dev_info(&pdev->dev, "device version %d, driver version %d\n", | 	dev_info(&pdev->dev, "device version %d, driver version %d\n", | ||||||
| 		 dev->dsr_version, PVRDMA_VERSION); | 		 dev->dsr_version, PVRDMA_VERSION); | ||||||
| 
 | 
 | ||||||
| 	dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr), | 	dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr), | ||||||
| 				       &dev->dsrbase, GFP_KERNEL); | 				      &dev->dsrbase, GFP_KERNEL); | ||||||
| 	if (!dev->dsr) { | 	if (!dev->dsr) { | ||||||
| 		dev_err(&pdev->dev, "failed to allocate shared region\n"); | 		dev_err(&pdev->dev, "failed to allocate shared region\n"); | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -147,8 +147,8 @@ static int rpi_ts_probe(struct platform_device *pdev) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	ts->pdev = pdev; | 	ts->pdev = pdev; | ||||||
| 
 | 
 | ||||||
| 	ts->fw_regs_va = dma_zalloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys, | 	ts->fw_regs_va = dma_alloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys, | ||||||
| 					     GFP_KERNEL); | 					    GFP_KERNEL); | ||||||
| 	if (!ts->fw_regs_va) { | 	if (!ts->fw_regs_va) { | ||||||
| 		dev_err(dev, "failed to dma_alloc_coherent\n"); | 		dev_err(dev, "failed to dma_alloc_coherent\n"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -232,9 +232,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data) | ||||||
| 
 | 
 | ||||||
| 	spin_lock_init(&dom->pgtlock); | 	spin_lock_init(&dom->pgtlock); | ||||||
| 
 | 
 | ||||||
| 	dom->pgt_va = dma_zalloc_coherent(data->dev, | 	dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE, | ||||||
| 				M2701_IOMMU_PGT_SIZE, | 					 &dom->pgt_pa, GFP_KERNEL); | ||||||
| 				&dom->pgt_pa, GFP_KERNEL); |  | ||||||
| 	if (!dom->pgt_va) | 	if (!dom->pgt_va) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -218,8 +218,8 @@ static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q) | ||||||
| { | { | ||||||
| 	struct device *dev = &cio2->pci_dev->dev; | 	struct device *dev = &cio2->pci_dev->dev; | ||||||
| 
 | 
 | ||||||
| 	q->fbpt = dma_zalloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr, | 	q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr, | ||||||
| 				      GFP_KERNEL); | 				     GFP_KERNEL); | ||||||
| 	if (!q->fbpt) | 	if (!q->fbpt) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -49,7 +49,7 @@ int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data, | ||||||
| 	struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data; | 	struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data; | ||||||
| 	struct device *dev = &ctx->dev->plat_dev->dev; | 	struct device *dev = &ctx->dev->plat_dev->dev; | ||||||
| 
 | 
 | ||||||
| 	mem->va = dma_zalloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL); | 	mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL); | ||||||
| 	if (!mem->va) { | 	if (!mem->va) { | ||||||
| 		mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev), | 		mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev), | ||||||
| 			     size); | 			     size); | ||||||
|  |  | ||||||
|  | @ -218,8 +218,8 @@ void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, | ||||||
| 	if (get_order(size) >= MAX_ORDER) | 	if (get_order(size) >= MAX_ORDER) | ||||||
| 		return NULL; | 		return NULL; | ||||||
| 
 | 
 | ||||||
| 	return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle, | 	return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle, | ||||||
| 				   GFP_KERNEL); | 				  GFP_KERNEL); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, | void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, | ||||||
|  |  | ||||||
|  | @ -3763,8 +3763,9 @@ int sdhci_setup_host(struct sdhci_host *host) | ||||||
| 		 * Use zalloc to zero the reserved high 32-bits of 128-bit | 		 * Use zalloc to zero the reserved high 32-bits of 128-bit | ||||||
| 		 * descriptors so that they never need to be written. | 		 * descriptors so that they never need to be written. | ||||||
| 		 */ | 		 */ | ||||||
| 		buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz + | 		buf = dma_alloc_coherent(mmc_dev(mmc), | ||||||
| 					 host->adma_table_sz, &dma, GFP_KERNEL); | 					 host->align_buffer_sz + host->adma_table_sz, | ||||||
|  | 					 &dma, GFP_KERNEL); | ||||||
| 		if (!buf) { | 		if (!buf) { | ||||||
| 			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", | 			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", | ||||||
| 				mmc_hostname(mmc)); | 				mmc_hostname(mmc)); | ||||||
|  |  | ||||||
|  | @ -1433,18 +1433,18 @@ static int greth_of_probe(struct platform_device *ofdev) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* Allocate TX descriptor ring in coherent memory */ | 	/* Allocate TX descriptor ring in coherent memory */ | ||||||
| 	greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024, | 	greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024, | ||||||
| 						&greth->tx_bd_base_phys, | 					       &greth->tx_bd_base_phys, | ||||||
| 						GFP_KERNEL); | 					       GFP_KERNEL); | ||||||
| 	if (!greth->tx_bd_base) { | 	if (!greth->tx_bd_base) { | ||||||
| 		err = -ENOMEM; | 		err = -ENOMEM; | ||||||
| 		goto error3; | 		goto error3; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* Allocate RX descriptor ring in coherent memory */ | 	/* Allocate RX descriptor ring in coherent memory */ | ||||||
| 	greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024, | 	greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024, | ||||||
| 						&greth->rx_bd_base_phys, | 					       &greth->rx_bd_base_phys, | ||||||
| 						GFP_KERNEL); | 					       GFP_KERNEL); | ||||||
| 	if (!greth->rx_bd_base) { | 	if (!greth->rx_bd_base) { | ||||||
| 		err = -ENOMEM; | 		err = -ENOMEM; | ||||||
| 		goto error4; | 		goto error4; | ||||||
|  |  | ||||||
|  | @ -795,8 +795,8 @@ static int slic_init_stat_queue(struct slic_device *sdev) | ||||||
| 	size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK; | 	size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK; | ||||||
| 
 | 
 | ||||||
| 	for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) { | 	for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) { | ||||||
| 		descs = dma_zalloc_coherent(&sdev->pdev->dev, size, &paddr, | 		descs = dma_alloc_coherent(&sdev->pdev->dev, size, &paddr, | ||||||
| 					    GFP_KERNEL); | 					   GFP_KERNEL); | ||||||
| 		if (!descs) { | 		if (!descs) { | ||||||
| 			netdev_err(sdev->netdev, | 			netdev_err(sdev->netdev, | ||||||
| 				   "failed to allocate status descriptors\n"); | 				   "failed to allocate status descriptors\n"); | ||||||
|  | @ -1240,8 +1240,8 @@ static int slic_init_shmem(struct slic_device *sdev) | ||||||
| 	struct slic_shmem_data *sm_data; | 	struct slic_shmem_data *sm_data; | ||||||
| 	dma_addr_t paddr; | 	dma_addr_t paddr; | ||||||
| 
 | 
 | ||||||
| 	sm_data = dma_zalloc_coherent(&sdev->pdev->dev, sizeof(*sm_data), | 	sm_data = dma_alloc_coherent(&sdev->pdev->dev, sizeof(*sm_data), | ||||||
| 				      &paddr, GFP_KERNEL); | 				     &paddr, GFP_KERNEL); | ||||||
| 	if (!sm_data) { | 	if (!sm_data) { | ||||||
| 		dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n"); | 		dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  | @ -1621,8 +1621,8 @@ static int slic_read_eeprom(struct slic_device *sdev) | ||||||
| 	int err = 0; | 	int err = 0; | ||||||
| 	u8 *mac[2]; | 	u8 *mac[2]; | ||||||
| 
 | 
 | ||||||
| 	eeprom = dma_zalloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, | 	eeprom = dma_alloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, | ||||||
| 				     &paddr, GFP_KERNEL); | 				    &paddr, GFP_KERNEL); | ||||||
| 	if (!eeprom) | 	if (!eeprom) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -111,8 +111,8 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) | ||||||
| 	struct ena_com_admin_sq *sq = &queue->sq; | 	struct ena_com_admin_sq *sq = &queue->sq; | ||||||
| 	u16 size = ADMIN_SQ_SIZE(queue->q_depth); | 	u16 size = ADMIN_SQ_SIZE(queue->q_depth); | ||||||
| 
 | 
 | ||||||
| 	sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr, | 	sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr, | ||||||
| 					  GFP_KERNEL); | 					 GFP_KERNEL); | ||||||
| 
 | 
 | ||||||
| 	if (!sq->entries) { | 	if (!sq->entries) { | ||||||
| 		pr_err("memory allocation failed"); | 		pr_err("memory allocation failed"); | ||||||
|  | @ -133,8 +133,8 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) | ||||||
| 	struct ena_com_admin_cq *cq = &queue->cq; | 	struct ena_com_admin_cq *cq = &queue->cq; | ||||||
| 	u16 size = ADMIN_CQ_SIZE(queue->q_depth); | 	u16 size = ADMIN_CQ_SIZE(queue->q_depth); | ||||||
| 
 | 
 | ||||||
| 	cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr, | 	cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr, | ||||||
| 					  GFP_KERNEL); | 					 GFP_KERNEL); | ||||||
| 
 | 
 | ||||||
| 	if (!cq->entries) { | 	if (!cq->entries) { | ||||||
| 		pr_err("memory allocation failed"); | 		pr_err("memory allocation failed"); | ||||||
|  | @ -156,8 +156,8 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev, | ||||||
| 
 | 
 | ||||||
| 	dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; | 	dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; | ||||||
| 	size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); | 	size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); | ||||||
| 	aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr, | 	aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr, | ||||||
| 					    GFP_KERNEL); | 					   GFP_KERNEL); | ||||||
| 
 | 
 | ||||||
| 	if (!aenq->entries) { | 	if (!aenq->entries) { | ||||||
| 		pr_err("memory allocation failed"); | 		pr_err("memory allocation failed"); | ||||||
|  | @ -344,15 +344,15 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, | ||||||
| 		dev_node = dev_to_node(ena_dev->dmadev); | 		dev_node = dev_to_node(ena_dev->dmadev); | ||||||
| 		set_dev_node(ena_dev->dmadev, ctx->numa_node); | 		set_dev_node(ena_dev->dmadev, ctx->numa_node); | ||||||
| 		io_sq->desc_addr.virt_addr = | 		io_sq->desc_addr.virt_addr = | ||||||
| 			dma_zalloc_coherent(ena_dev->dmadev, size, | 			dma_alloc_coherent(ena_dev->dmadev, size, | ||||||
| 					    &io_sq->desc_addr.phys_addr, | 					   &io_sq->desc_addr.phys_addr, | ||||||
| 					    GFP_KERNEL); | 					   GFP_KERNEL); | ||||||
| 		set_dev_node(ena_dev->dmadev, dev_node); | 		set_dev_node(ena_dev->dmadev, dev_node); | ||||||
| 		if (!io_sq->desc_addr.virt_addr) { | 		if (!io_sq->desc_addr.virt_addr) { | ||||||
| 			io_sq->desc_addr.virt_addr = | 			io_sq->desc_addr.virt_addr = | ||||||
| 				dma_zalloc_coherent(ena_dev->dmadev, size, | 				dma_alloc_coherent(ena_dev->dmadev, size, | ||||||
| 						    &io_sq->desc_addr.phys_addr, | 						   &io_sq->desc_addr.phys_addr, | ||||||
| 						    GFP_KERNEL); | 						   GFP_KERNEL); | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		if (!io_sq->desc_addr.virt_addr) { | 		if (!io_sq->desc_addr.virt_addr) { | ||||||
|  | @ -425,14 +425,14 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, | ||||||
| 	prev_node = dev_to_node(ena_dev->dmadev); | 	prev_node = dev_to_node(ena_dev->dmadev); | ||||||
| 	set_dev_node(ena_dev->dmadev, ctx->numa_node); | 	set_dev_node(ena_dev->dmadev, ctx->numa_node); | ||||||
| 	io_cq->cdesc_addr.virt_addr = | 	io_cq->cdesc_addr.virt_addr = | ||||||
| 		dma_zalloc_coherent(ena_dev->dmadev, size, | 		dma_alloc_coherent(ena_dev->dmadev, size, | ||||||
| 				    &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); | 				   &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); | ||||||
| 	set_dev_node(ena_dev->dmadev, prev_node); | 	set_dev_node(ena_dev->dmadev, prev_node); | ||||||
| 	if (!io_cq->cdesc_addr.virt_addr) { | 	if (!io_cq->cdesc_addr.virt_addr) { | ||||||
| 		io_cq->cdesc_addr.virt_addr = | 		io_cq->cdesc_addr.virt_addr = | ||||||
| 			dma_zalloc_coherent(ena_dev->dmadev, size, | 			dma_alloc_coherent(ena_dev->dmadev, size, | ||||||
| 					    &io_cq->cdesc_addr.phys_addr, | 					   &io_cq->cdesc_addr.phys_addr, | ||||||
| 					    GFP_KERNEL); | 					   GFP_KERNEL); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (!io_cq->cdesc_addr.virt_addr) { | 	if (!io_cq->cdesc_addr.virt_addr) { | ||||||
|  | @ -1026,8 +1026,8 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) | ||||||
| 	struct ena_rss *rss = &ena_dev->rss; | 	struct ena_rss *rss = &ena_dev->rss; | ||||||
| 
 | 
 | ||||||
| 	rss->hash_key = | 	rss->hash_key = | ||||||
| 		dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), | 		dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), | ||||||
| 				    &rss->hash_key_dma_addr, GFP_KERNEL); | 				   &rss->hash_key_dma_addr, GFP_KERNEL); | ||||||
| 
 | 
 | ||||||
| 	if (unlikely(!rss->hash_key)) | 	if (unlikely(!rss->hash_key)) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  | @ -1050,8 +1050,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) | ||||||
| 	struct ena_rss *rss = &ena_dev->rss; | 	struct ena_rss *rss = &ena_dev->rss; | ||||||
| 
 | 
 | ||||||
| 	rss->hash_ctrl = | 	rss->hash_ctrl = | ||||||
| 		dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), | 		dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), | ||||||
| 				    &rss->hash_ctrl_dma_addr, GFP_KERNEL); | 				   &rss->hash_ctrl_dma_addr, GFP_KERNEL); | ||||||
| 
 | 
 | ||||||
| 	if (unlikely(!rss->hash_ctrl)) | 	if (unlikely(!rss->hash_ctrl)) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  | @ -1094,8 +1094,8 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, | ||||||
| 		sizeof(struct ena_admin_rss_ind_table_entry); | 		sizeof(struct ena_admin_rss_ind_table_entry); | ||||||
| 
 | 
 | ||||||
| 	rss->rss_ind_tbl = | 	rss->rss_ind_tbl = | ||||||
| 		dma_zalloc_coherent(ena_dev->dmadev, tbl_size, | 		dma_alloc_coherent(ena_dev->dmadev, tbl_size, | ||||||
| 				    &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); | 				   &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); | ||||||
| 	if (unlikely(!rss->rss_ind_tbl)) | 	if (unlikely(!rss->rss_ind_tbl)) | ||||||
| 		goto mem_err1; | 		goto mem_err1; | ||||||
| 
 | 
 | ||||||
|  | @ -1649,9 +1649,9 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) | ||||||
| 
 | 
 | ||||||
| 	spin_lock_init(&mmio_read->lock); | 	spin_lock_init(&mmio_read->lock); | ||||||
| 	mmio_read->read_resp = | 	mmio_read->read_resp = | ||||||
| 		dma_zalloc_coherent(ena_dev->dmadev, | 		dma_alloc_coherent(ena_dev->dmadev, | ||||||
| 				    sizeof(*mmio_read->read_resp), | 				   sizeof(*mmio_read->read_resp), | ||||||
| 				    &mmio_read->read_resp_dma_addr, GFP_KERNEL); | 				   &mmio_read->read_resp_dma_addr, GFP_KERNEL); | ||||||
| 	if (unlikely(!mmio_read->read_resp)) | 	if (unlikely(!mmio_read->read_resp)) | ||||||
| 		goto err; | 		goto err; | ||||||
| 
 | 
 | ||||||
|  | @ -2623,8 +2623,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) | ||||||
| 	struct ena_host_attribute *host_attr = &ena_dev->host_attr; | 	struct ena_host_attribute *host_attr = &ena_dev->host_attr; | ||||||
| 
 | 
 | ||||||
| 	host_attr->host_info = | 	host_attr->host_info = | ||||||
| 		dma_zalloc_coherent(ena_dev->dmadev, SZ_4K, | 		dma_alloc_coherent(ena_dev->dmadev, SZ_4K, | ||||||
| 				    &host_attr->host_info_dma_addr, GFP_KERNEL); | 				   &host_attr->host_info_dma_addr, GFP_KERNEL); | ||||||
| 	if (unlikely(!host_attr->host_info)) | 	if (unlikely(!host_attr->host_info)) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -2641,8 +2641,9 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, | ||||||
| 	struct ena_host_attribute *host_attr = &ena_dev->host_attr; | 	struct ena_host_attribute *host_attr = &ena_dev->host_attr; | ||||||
| 
 | 
 | ||||||
| 	host_attr->debug_area_virt_addr = | 	host_attr->debug_area_virt_addr = | ||||||
| 		dma_zalloc_coherent(ena_dev->dmadev, debug_area_size, | 		dma_alloc_coherent(ena_dev->dmadev, debug_area_size, | ||||||
| 				    &host_attr->debug_area_dma_addr, GFP_KERNEL); | 				   &host_attr->debug_area_dma_addr, | ||||||
|  | 				   GFP_KERNEL); | ||||||
| 	if (unlikely(!host_attr->debug_area_virt_addr)) { | 	if (unlikely(!host_attr->debug_area_virt_addr)) { | ||||||
| 		host_attr->debug_area_size = 0; | 		host_attr->debug_area_size = 0; | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -206,8 +206,8 @@ static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* Packet buffers should be 64B aligned */ | 	/* Packet buffers should be 64B aligned */ | ||||||
| 	pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr, | 	pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr, | ||||||
| 				      GFP_ATOMIC); | 				     GFP_ATOMIC); | ||||||
| 	if (unlikely(!pkt_buf)) { | 	if (unlikely(!pkt_buf)) { | ||||||
| 		dev_kfree_skb_any(skb); | 		dev_kfree_skb_any(skb); | ||||||
| 		return NETDEV_TX_OK; | 		return NETDEV_TX_OK; | ||||||
|  | @ -428,8 +428,8 @@ static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev) | ||||||
| 	ring->ndev = ndev; | 	ring->ndev = ndev; | ||||||
| 
 | 
 | ||||||
| 	size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC; | 	size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC; | ||||||
| 	ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr, | 	ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr, | ||||||
| 					      GFP_KERNEL); | 					     GFP_KERNEL); | ||||||
| 	if (!ring->desc_addr) | 	if (!ring->desc_addr) | ||||||
| 		goto err; | 		goto err; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -660,10 +660,9 @@ static int alx_alloc_rings(struct alx_priv *alx) | ||||||
| 			    alx->num_txq + | 			    alx->num_txq + | ||||||
| 			    sizeof(struct alx_rrd) * alx->rx_ringsz + | 			    sizeof(struct alx_rrd) * alx->rx_ringsz + | ||||||
| 			    sizeof(struct alx_rfd) * alx->rx_ringsz; | 			    sizeof(struct alx_rfd) * alx->rx_ringsz; | ||||||
| 	alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev, | 	alx->descmem.virt = dma_alloc_coherent(&alx->hw.pdev->dev, | ||||||
| 						alx->descmem.size, | 					       alx->descmem.size, | ||||||
| 						&alx->descmem.dma, | 					       &alx->descmem.dma, GFP_KERNEL); | ||||||
| 						GFP_KERNEL); |  | ||||||
| 	if (!alx->descmem.virt) | 	if (!alx->descmem.virt) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1019,8 +1019,8 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter) | ||||||
| 		sizeof(struct atl1c_recv_ret_status) * rx_desc_count + | 		sizeof(struct atl1c_recv_ret_status) * rx_desc_count + | ||||||
| 		8 * 4; | 		8 * 4; | ||||||
| 
 | 
 | ||||||
| 	ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size, | 	ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size, | ||||||
| 						&ring_header->dma, GFP_KERNEL); | 					       &ring_header->dma, GFP_KERNEL); | ||||||
| 	if (unlikely(!ring_header->desc)) { | 	if (unlikely(!ring_header->desc)) { | ||||||
| 		dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); | 		dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); | ||||||
| 		goto err_nomem; | 		goto err_nomem; | ||||||
|  |  | ||||||
|  | @ -936,7 +936,7 @@ static int bcm_enet_open(struct net_device *dev) | ||||||
| 
 | 
 | ||||||
| 	/* allocate rx dma ring */ | 	/* allocate rx dma ring */ | ||||||
| 	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); | 	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); | ||||||
| 	p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); | 	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); | ||||||
| 	if (!p) { | 	if (!p) { | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
| 		goto out_freeirq_tx; | 		goto out_freeirq_tx; | ||||||
|  | @ -947,7 +947,7 @@ static int bcm_enet_open(struct net_device *dev) | ||||||
| 
 | 
 | ||||||
| 	/* allocate tx dma ring */ | 	/* allocate tx dma ring */ | ||||||
| 	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); | 	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); | ||||||
| 	p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); | 	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); | ||||||
| 	if (!p) { | 	if (!p) { | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
| 		goto out_free_rx_ring; | 		goto out_free_rx_ring; | ||||||
|  | @ -2120,7 +2120,7 @@ static int bcm_enetsw_open(struct net_device *dev) | ||||||
| 
 | 
 | ||||||
| 	/* allocate rx dma ring */ | 	/* allocate rx dma ring */ | ||||||
| 	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); | 	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); | ||||||
| 	p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); | 	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); | ||||||
| 	if (!p) { | 	if (!p) { | ||||||
| 		dev_err(kdev, "cannot allocate rx ring %u\n", size); | 		dev_err(kdev, "cannot allocate rx ring %u\n", size); | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
|  | @ -2132,7 +2132,7 @@ static int bcm_enetsw_open(struct net_device *dev) | ||||||
| 
 | 
 | ||||||
| 	/* allocate tx dma ring */ | 	/* allocate tx dma ring */ | ||||||
| 	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); | 	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); | ||||||
| 	p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); | 	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); | ||||||
| 	if (!p) { | 	if (!p) { | ||||||
| 		dev_err(kdev, "cannot allocate tx ring\n"); | 		dev_err(kdev, "cannot allocate tx ring\n"); | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -1506,8 +1506,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, | ||||||
| 	/* We just need one DMA descriptor which is DMA-able, since writing to
 | 	/* We just need one DMA descriptor which is DMA-able, since writing to
 | ||||||
| 	 * the port will allocate a new descriptor in its internal linked-list | 	 * the port will allocate a new descriptor in its internal linked-list | ||||||
| 	 */ | 	 */ | ||||||
| 	p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, | 	p = dma_alloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, | ||||||
| 				GFP_KERNEL); | 			       GFP_KERNEL); | ||||||
| 	if (!p) { | 	if (!p) { | ||||||
| 		netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); | 		netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -634,9 +634,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) | ||||||
| 
 | 
 | ||||||
| 		/* Alloc ring of descriptors */ | 		/* Alloc ring of descriptors */ | ||||||
| 		size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc); | 		size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc); | ||||||
| 		ring->cpu_base = dma_zalloc_coherent(dma_dev, size, | 		ring->cpu_base = dma_alloc_coherent(dma_dev, size, | ||||||
| 						     &ring->dma_base, | 						    &ring->dma_base, | ||||||
| 						     GFP_KERNEL); | 						    GFP_KERNEL); | ||||||
| 		if (!ring->cpu_base) { | 		if (!ring->cpu_base) { | ||||||
| 			dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n", | 			dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n", | ||||||
| 				ring->mmio_base); | 				ring->mmio_base); | ||||||
|  | @ -659,9 +659,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) | ||||||
| 
 | 
 | ||||||
| 		/* Alloc ring of descriptors */ | 		/* Alloc ring of descriptors */ | ||||||
| 		size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc); | 		size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc); | ||||||
| 		ring->cpu_base = dma_zalloc_coherent(dma_dev, size, | 		ring->cpu_base = dma_alloc_coherent(dma_dev, size, | ||||||
| 						     &ring->dma_base, | 						    &ring->dma_base, | ||||||
| 						     GFP_KERNEL); | 						    GFP_KERNEL); | ||||||
| 		if (!ring->cpu_base) { | 		if (!ring->cpu_base) { | ||||||
| 			dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n", | 			dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n", | ||||||
| 				ring->mmio_base); | 				ring->mmio_base); | ||||||
|  |  | ||||||
|  | @ -844,8 +844,8 @@ bnx2_alloc_stats_blk(struct net_device *dev) | ||||||
| 						 BNX2_SBLK_MSIX_ALIGN_SIZE); | 						 BNX2_SBLK_MSIX_ALIGN_SIZE); | ||||||
| 	bp->status_stats_size = status_blk_size + | 	bp->status_stats_size = status_blk_size + | ||||||
| 				sizeof(struct statistics_block); | 				sizeof(struct statistics_block); | ||||||
| 	status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size, | 	status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, | ||||||
| 					 &bp->status_blk_mapping, GFP_KERNEL); | 					&bp->status_blk_mapping, GFP_KERNEL); | ||||||
| 	if (!status_blk) | 	if (!status_blk) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -3449,10 +3449,10 @@ static int bnxt_alloc_stats(struct bnxt *bp) | ||||||
| 			goto alloc_tx_ext_stats; | 			goto alloc_tx_ext_stats; | ||||||
| 
 | 
 | ||||||
| 		bp->hw_rx_port_stats_ext = | 		bp->hw_rx_port_stats_ext = | ||||||
| 			dma_zalloc_coherent(&pdev->dev, | 			dma_alloc_coherent(&pdev->dev, | ||||||
| 					    sizeof(struct rx_port_stats_ext), | 					   sizeof(struct rx_port_stats_ext), | ||||||
| 					    &bp->hw_rx_port_stats_ext_map, | 					   &bp->hw_rx_port_stats_ext_map, | ||||||
| 					    GFP_KERNEL); | 					   GFP_KERNEL); | ||||||
| 		if (!bp->hw_rx_port_stats_ext) | 		if (!bp->hw_rx_port_stats_ext) | ||||||
| 			return 0; | 			return 0; | ||||||
| 
 | 
 | ||||||
|  | @ -3462,10 +3462,10 @@ static int bnxt_alloc_stats(struct bnxt *bp) | ||||||
| 
 | 
 | ||||||
| 		if (bp->hwrm_spec_code >= 0x10902) { | 		if (bp->hwrm_spec_code >= 0x10902) { | ||||||
| 			bp->hw_tx_port_stats_ext = | 			bp->hw_tx_port_stats_ext = | ||||||
| 				dma_zalloc_coherent(&pdev->dev, | 				dma_alloc_coherent(&pdev->dev, | ||||||
| 					    sizeof(struct tx_port_stats_ext), | 						   sizeof(struct tx_port_stats_ext), | ||||||
| 					    &bp->hw_tx_port_stats_ext_map, | 						   &bp->hw_tx_port_stats_ext_map, | ||||||
| 					    GFP_KERNEL); | 						   GFP_KERNEL); | ||||||
| 		} | 		} | ||||||
| 		bp->flags |= BNXT_FLAG_PORT_STATS_EXT; | 		bp->flags |= BNXT_FLAG_PORT_STATS_EXT; | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -316,8 +316,8 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, | ||||||
| 
 | 
 | ||||||
| 	n = IEEE_8021QAZ_MAX_TCS; | 	n = IEEE_8021QAZ_MAX_TCS; | ||||||
| 	data_len = sizeof(*data) + sizeof(*fw_app) * n; | 	data_len = sizeof(*data) + sizeof(*fw_app) * n; | ||||||
| 	data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping, | 	data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping, | ||||||
| 				   GFP_KERNEL); | 				  GFP_KERNEL); | ||||||
| 	if (!data) | 	if (!data) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -85,8 +85,8 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, | ||||||
| 		return -EFAULT; | 		return -EFAULT; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize, | 	data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize, | ||||||
| 					&data_dma_addr, GFP_KERNEL); | 				       &data_dma_addr, GFP_KERNEL); | ||||||
| 	if (!data_addr) | 	if (!data_addr) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -8712,10 +8712,10 @@ static int tg3_mem_rx_acquire(struct tg3 *tp) | ||||||
| 		if (!i && tg3_flag(tp, ENABLE_RSS)) | 		if (!i && tg3_flag(tp, ENABLE_RSS)) | ||||||
| 			continue; | 			continue; | ||||||
| 
 | 
 | ||||||
| 		tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev, | 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, | ||||||
| 						    TG3_RX_RCB_RING_BYTES(tp), | 						   TG3_RX_RCB_RING_BYTES(tp), | ||||||
| 						    &tnapi->rx_rcb_mapping, | 						   &tnapi->rx_rcb_mapping, | ||||||
| 						    GFP_KERNEL); | 						   GFP_KERNEL); | ||||||
| 		if (!tnapi->rx_rcb) | 		if (!tnapi->rx_rcb) | ||||||
| 			goto err_out; | 			goto err_out; | ||||||
| 	} | 	} | ||||||
|  | @ -8768,9 +8768,9 @@ static int tg3_alloc_consistent(struct tg3 *tp) | ||||||
| { | { | ||||||
| 	int i; | 	int i; | ||||||
| 
 | 
 | ||||||
| 	tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev, | 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, | ||||||
| 					   sizeof(struct tg3_hw_stats), | 					  sizeof(struct tg3_hw_stats), | ||||||
| 					   &tp->stats_mapping, GFP_KERNEL); | 					  &tp->stats_mapping, GFP_KERNEL); | ||||||
| 	if (!tp->hw_stats) | 	if (!tp->hw_stats) | ||||||
| 		goto err_out; | 		goto err_out; | ||||||
| 
 | 
 | ||||||
|  | @ -8778,10 +8778,10 @@ static int tg3_alloc_consistent(struct tg3 *tp) | ||||||
| 		struct tg3_napi *tnapi = &tp->napi[i]; | 		struct tg3_napi *tnapi = &tp->napi[i]; | ||||||
| 		struct tg3_hw_status *sblk; | 		struct tg3_hw_status *sblk; | ||||||
| 
 | 
 | ||||||
| 		tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev, | 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, | ||||||
| 						       TG3_HW_STATUS_SIZE, | 						      TG3_HW_STATUS_SIZE, | ||||||
| 						       &tnapi->status_mapping, | 						      &tnapi->status_mapping, | ||||||
| 						       GFP_KERNEL); | 						      GFP_KERNEL); | ||||||
| 		if (!tnapi->hw_status) | 		if (!tnapi->hw_status) | ||||||
| 			goto err_out; | 			goto err_out; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -59,7 +59,7 @@ static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, | ||||||
| 	dmem->q_len = q_len; | 	dmem->q_len = q_len; | ||||||
| 	dmem->size = (desc_size * q_len) + align_bytes; | 	dmem->size = (desc_size * q_len) + align_bytes; | ||||||
| 	/* Save address, need it while freeing */ | 	/* Save address, need it while freeing */ | ||||||
| 	dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, | 	dmem->unalign_base = dma_alloc_coherent(&nic->pdev->dev, dmem->size, | ||||||
| 						&dmem->dma, GFP_KERNEL); | 						&dmem->dma, GFP_KERNEL); | ||||||
| 	if (!dmem->unalign_base) | 	if (!dmem->unalign_base) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -620,7 +620,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, | ||||||
| { | { | ||||||
| 	size_t len = nelem * elem_size; | 	size_t len = nelem * elem_size; | ||||||
| 	void *s = NULL; | 	void *s = NULL; | ||||||
| 	void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); | 	void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); | ||||||
| 
 | 
 | ||||||
| 	if (!p) | 	if (!p) | ||||||
| 		return NULL; | 		return NULL; | ||||||
|  |  | ||||||
|  | @ -694,7 +694,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, | ||||||
| { | { | ||||||
| 	size_t len = nelem * elem_size + stat_size; | 	size_t len = nelem * elem_size + stat_size; | ||||||
| 	void *s = NULL; | 	void *s = NULL; | ||||||
| 	void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL); | 	void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); | ||||||
| 
 | 
 | ||||||
| 	if (!p) | 	if (!p) | ||||||
| 		return NULL; | 		return NULL; | ||||||
|  |  | ||||||
|  | @ -756,7 +756,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, | ||||||
| 	 * Allocate the hardware ring and PCI DMA bus address space for said. | 	 * Allocate the hardware ring and PCI DMA bus address space for said. | ||||||
| 	 */ | 	 */ | ||||||
| 	size_t hwlen = nelem * hwsize + stat_size; | 	size_t hwlen = nelem * hwsize + stat_size; | ||||||
| 	void *hwring = dma_zalloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); | 	void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); | ||||||
| 
 | 
 | ||||||
| 	if (!hwring) | 	if (!hwring) | ||||||
| 		return NULL; | 		return NULL; | ||||||
|  |  | ||||||
|  | @ -1808,9 +1808,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf) | ||||||
| 	total_size = buf_len; | 	total_size = buf_len; | ||||||
| 
 | 
 | ||||||
| 	get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; | 	get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; | ||||||
| 	get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, | 	get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, | ||||||
| 					     get_fat_cmd.size, | 					    get_fat_cmd.size, | ||||||
| 					     &get_fat_cmd.dma, GFP_ATOMIC); | 					    &get_fat_cmd.dma, GFP_ATOMIC); | ||||||
| 	if (!get_fat_cmd.va) | 	if (!get_fat_cmd.va) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -2302,8 +2302,8 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
| 
 | 
 | ||||||
| 	cmd.size = sizeof(struct be_cmd_resp_port_type); | 	cmd.size = sizeof(struct be_cmd_resp_port_type); | ||||||
| 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | ||||||
| 				     GFP_ATOMIC); | 				    GFP_ATOMIC); | ||||||
| 	if (!cmd.va) { | 	if (!cmd.va) { | ||||||
| 		dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); | 		dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  | @ -3066,8 +3066,8 @@ int lancer_fw_download(struct be_adapter *adapter, | ||||||
| 
 | 
 | ||||||
| 	flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) | 	flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) | ||||||
| 				+ LANCER_FW_DOWNLOAD_CHUNK; | 				+ LANCER_FW_DOWNLOAD_CHUNK; | ||||||
| 	flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, | 	flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, | ||||||
| 					   &flash_cmd.dma, GFP_KERNEL); | 					  GFP_KERNEL); | ||||||
| 	if (!flash_cmd.va) | 	if (!flash_cmd.va) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -3184,8 +3184,8 @@ int be_fw_download(struct be_adapter *adapter, const struct firmware *fw) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	flash_cmd.size = sizeof(struct be_cmd_write_flashrom); | 	flash_cmd.size = sizeof(struct be_cmd_write_flashrom); | ||||||
| 	flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, | 	flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, | ||||||
| 					   GFP_KERNEL); | 					  GFP_KERNEL); | ||||||
| 	if (!flash_cmd.va) | 	if (!flash_cmd.va) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -3435,8 +3435,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) | ||||||
| 		goto err; | 		goto err; | ||||||
| 	} | 	} | ||||||
| 	cmd.size = sizeof(struct be_cmd_req_get_phy_info); | 	cmd.size = sizeof(struct be_cmd_req_get_phy_info); | ||||||
| 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | ||||||
| 				     GFP_ATOMIC); | 				    GFP_ATOMIC); | ||||||
| 	if (!cmd.va) { | 	if (!cmd.va) { | ||||||
| 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); | 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); | ||||||
| 		status = -ENOMEM; | 		status = -ENOMEM; | ||||||
|  | @ -3522,9 +3522,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) | ||||||
| 
 | 
 | ||||||
| 	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); | 	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); | ||||||
| 	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); | 	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); | ||||||
| 	attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, | 	attribs_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, | ||||||
| 					     attribs_cmd.size, | 					    attribs_cmd.size, | ||||||
| 					     &attribs_cmd.dma, GFP_ATOMIC); | 					    &attribs_cmd.dma, GFP_ATOMIC); | ||||||
| 	if (!attribs_cmd.va) { | 	if (!attribs_cmd.va) { | ||||||
| 		dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); | 		dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); | ||||||
| 		status = -ENOMEM; | 		status = -ENOMEM; | ||||||
|  | @ -3699,10 +3699,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, | ||||||
| 
 | 
 | ||||||
| 	memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); | 	memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); | ||||||
| 	get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); | 	get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); | ||||||
| 	get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, | 	get_mac_list_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, | ||||||
| 						  get_mac_list_cmd.size, | 						 get_mac_list_cmd.size, | ||||||
| 						  &get_mac_list_cmd.dma, | 						 &get_mac_list_cmd.dma, | ||||||
| 						  GFP_ATOMIC); | 						 GFP_ATOMIC); | ||||||
| 
 | 
 | ||||||
| 	if (!get_mac_list_cmd.va) { | 	if (!get_mac_list_cmd.va) { | ||||||
| 		dev_err(&adapter->pdev->dev, | 		dev_err(&adapter->pdev->dev, | ||||||
|  | @ -3829,8 +3829,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, | ||||||
| 
 | 
 | ||||||
| 	memset(&cmd, 0, sizeof(struct be_dma_mem)); | 	memset(&cmd, 0, sizeof(struct be_dma_mem)); | ||||||
| 	cmd.size = sizeof(struct be_cmd_req_set_mac_list); | 	cmd.size = sizeof(struct be_cmd_req_set_mac_list); | ||||||
| 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | ||||||
| 				     GFP_KERNEL); | 				    GFP_KERNEL); | ||||||
| 	if (!cmd.va) | 	if (!cmd.va) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -4035,8 +4035,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) | ||||||
| 
 | 
 | ||||||
| 	memset(&cmd, 0, sizeof(struct be_dma_mem)); | 	memset(&cmd, 0, sizeof(struct be_dma_mem)); | ||||||
| 	cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); | 	cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); | ||||||
| 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | ||||||
| 				     GFP_ATOMIC); | 				    GFP_ATOMIC); | ||||||
| 	if (!cmd.va) { | 	if (!cmd.va) { | ||||||
| 		dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); | 		dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); | ||||||
| 		status = -ENOMEM; | 		status = -ENOMEM; | ||||||
|  | @ -4089,9 +4089,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) | ||||||
| 
 | 
 | ||||||
| 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); | 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); | ||||||
| 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); | 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); | ||||||
| 	extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, | 	extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, | ||||||
| 					    extfat_cmd.size, &extfat_cmd.dma, | 					   extfat_cmd.size, &extfat_cmd.dma, | ||||||
| 					    GFP_ATOMIC); | 					   GFP_ATOMIC); | ||||||
| 	if (!extfat_cmd.va) | 	if (!extfat_cmd.va) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -4127,9 +4127,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter) | ||||||
| 
 | 
 | ||||||
| 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); | 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); | ||||||
| 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); | 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); | ||||||
| 	extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, | 	extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, | ||||||
| 					    extfat_cmd.size, &extfat_cmd.dma, | 					   extfat_cmd.size, &extfat_cmd.dma, | ||||||
| 					    GFP_ATOMIC); | 					   GFP_ATOMIC); | ||||||
| 
 | 
 | ||||||
| 	if (!extfat_cmd.va) { | 	if (!extfat_cmd.va) { | ||||||
| 		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", | 		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", | ||||||
|  | @ -4354,8 +4354,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) | ||||||
| 
 | 
 | ||||||
| 	memset(&cmd, 0, sizeof(struct be_dma_mem)); | 	memset(&cmd, 0, sizeof(struct be_dma_mem)); | ||||||
| 	cmd.size = sizeof(struct be_cmd_resp_get_func_config); | 	cmd.size = sizeof(struct be_cmd_resp_get_func_config); | ||||||
| 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | ||||||
| 				     GFP_ATOMIC); | 				    GFP_ATOMIC); | ||||||
| 	if (!cmd.va) { | 	if (!cmd.va) { | ||||||
| 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); | 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); | ||||||
| 		status = -ENOMEM; | 		status = -ENOMEM; | ||||||
|  | @ -4452,8 +4452,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, | ||||||
| 
 | 
 | ||||||
| 	memset(&cmd, 0, sizeof(struct be_dma_mem)); | 	memset(&cmd, 0, sizeof(struct be_dma_mem)); | ||||||
| 	cmd.size = sizeof(struct be_cmd_resp_get_profile_config); | 	cmd.size = sizeof(struct be_cmd_resp_get_profile_config); | ||||||
| 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | ||||||
| 				     GFP_ATOMIC); | 				    GFP_ATOMIC); | ||||||
| 	if (!cmd.va) | 	if (!cmd.va) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -4539,8 +4539,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, | ||||||
| 
 | 
 | ||||||
| 	memset(&cmd, 0, sizeof(struct be_dma_mem)); | 	memset(&cmd, 0, sizeof(struct be_dma_mem)); | ||||||
| 	cmd.size = sizeof(struct be_cmd_req_set_profile_config); | 	cmd.size = sizeof(struct be_cmd_req_set_profile_config); | ||||||
| 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | ||||||
| 				     GFP_ATOMIC); | 				    GFP_ATOMIC); | ||||||
| 	if (!cmd.va) | 	if (!cmd.va) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -274,8 +274,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, | ||||||
| 	int status = 0; | 	int status = 0; | ||||||
| 
 | 
 | ||||||
| 	read_cmd.size = LANCER_READ_FILE_CHUNK; | 	read_cmd.size = LANCER_READ_FILE_CHUNK; | ||||||
| 	read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size, | 	read_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, read_cmd.size, | ||||||
| 					  &read_cmd.dma, GFP_ATOMIC); | 					 &read_cmd.dma, GFP_ATOMIC); | ||||||
| 
 | 
 | ||||||
| 	if (!read_cmd.va) { | 	if (!read_cmd.va) { | ||||||
| 		dev_err(&adapter->pdev->dev, | 		dev_err(&adapter->pdev->dev, | ||||||
|  | @ -815,7 +815,7 @@ static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); | 	cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); | ||||||
| 	cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); | 	cmd.va = dma_alloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); | ||||||
| 	if (!cmd.va) | 	if (!cmd.va) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -851,9 +851,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter) | ||||||
| 	}; | 	}; | ||||||
| 
 | 
 | ||||||
| 	ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); | 	ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); | ||||||
| 	ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, | 	ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, | ||||||
| 					    ddrdma_cmd.size, &ddrdma_cmd.dma, | 					   ddrdma_cmd.size, &ddrdma_cmd.dma, | ||||||
| 					    GFP_KERNEL); | 					   GFP_KERNEL); | ||||||
| 	if (!ddrdma_cmd.va) | 	if (!ddrdma_cmd.va) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -1014,9 +1014,9 @@ static int be_read_eeprom(struct net_device *netdev, | ||||||
| 
 | 
 | ||||||
| 	memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); | 	memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); | ||||||
| 	eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); | 	eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); | ||||||
| 	eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, | 	eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, | ||||||
| 					    eeprom_cmd.size, &eeprom_cmd.dma, | 					   eeprom_cmd.size, &eeprom_cmd.dma, | ||||||
| 					    GFP_KERNEL); | 					   GFP_KERNEL); | ||||||
| 
 | 
 | ||||||
| 	if (!eeprom_cmd.va) | 	if (!eeprom_cmd.va) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -167,8 +167,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, | ||||||
| 	q->len = len; | 	q->len = len; | ||||||
| 	q->entry_size = entry_size; | 	q->entry_size = entry_size; | ||||||
| 	mem->size = len * entry_size; | 	mem->size = len * entry_size; | ||||||
| 	mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, | 	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, | ||||||
| 				      GFP_KERNEL); | 				     &mem->dma, GFP_KERNEL); | ||||||
| 	if (!mem->va) | 	if (!mem->va) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	return 0; | 	return 0; | ||||||
|  | @ -5766,9 +5766,9 @@ static int be_drv_init(struct be_adapter *adapter) | ||||||
| 	int status = 0; | 	int status = 0; | ||||||
| 
 | 
 | ||||||
| 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; | 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; | ||||||
| 	mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size, | 	mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size, | ||||||
| 						 &mbox_mem_alloc->dma, | 						&mbox_mem_alloc->dma, | ||||||
| 						 GFP_KERNEL); | 						GFP_KERNEL); | ||||||
| 	if (!mbox_mem_alloc->va) | 	if (!mbox_mem_alloc->va) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -5777,8 +5777,8 @@ static int be_drv_init(struct be_adapter *adapter) | ||||||
| 	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); | 	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); | ||||||
| 
 | 
 | ||||||
| 	rx_filter->size = sizeof(struct be_cmd_req_rx_filter); | 	rx_filter->size = sizeof(struct be_cmd_req_rx_filter); | ||||||
| 	rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, | 	rx_filter->va = dma_alloc_coherent(dev, rx_filter->size, | ||||||
| 					    &rx_filter->dma, GFP_KERNEL); | 					   &rx_filter->dma, GFP_KERNEL); | ||||||
| 	if (!rx_filter->va) { | 	if (!rx_filter->va) { | ||||||
| 		status = -ENOMEM; | 		status = -ENOMEM; | ||||||
| 		goto free_mbox; | 		goto free_mbox; | ||||||
|  | @ -5792,8 +5792,8 @@ static int be_drv_init(struct be_adapter *adapter) | ||||||
| 		stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1); | 		stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1); | ||||||
| 	else | 	else | ||||||
| 		stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2); | 		stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2); | ||||||
| 	stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size, | 	stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size, | ||||||
| 					    &stats_cmd->dma, GFP_KERNEL); | 					   &stats_cmd->dma, GFP_KERNEL); | ||||||
| 	if (!stats_cmd->va) { | 	if (!stats_cmd->va) { | ||||||
| 		status = -ENOMEM; | 		status = -ENOMEM; | ||||||
| 		goto free_rx_filter; | 		goto free_rx_filter; | ||||||
|  |  | ||||||
|  | @ -935,16 +935,14 @@ static int ftgmac100_alloc_rings(struct ftgmac100 *priv) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 	/* Allocate descriptors */ | 	/* Allocate descriptors */ | ||||||
| 	priv->rxdes = dma_zalloc_coherent(priv->dev, | 	priv->rxdes = dma_alloc_coherent(priv->dev, | ||||||
| 					  MAX_RX_QUEUE_ENTRIES * | 					 MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes), | ||||||
| 					  sizeof(struct ftgmac100_rxdes), | 					 &priv->rxdes_dma, GFP_KERNEL); | ||||||
| 					  &priv->rxdes_dma, GFP_KERNEL); |  | ||||||
| 	if (!priv->rxdes) | 	if (!priv->rxdes) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	priv->txdes = dma_zalloc_coherent(priv->dev, | 	priv->txdes = dma_alloc_coherent(priv->dev, | ||||||
| 					  MAX_TX_QUEUE_ENTRIES * | 					 MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes), | ||||||
| 					  sizeof(struct ftgmac100_txdes), | 					 &priv->txdes_dma, GFP_KERNEL); | ||||||
| 					  &priv->txdes_dma, GFP_KERNEL); |  | ||||||
| 	if (!priv->txdes) | 	if (!priv->txdes) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -734,10 +734,9 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv) | ||||||
| { | { | ||||||
| 	int i; | 	int i; | ||||||
| 
 | 
 | ||||||
| 	priv->descs = dma_zalloc_coherent(priv->dev, | 	priv->descs = dma_alloc_coherent(priv->dev, | ||||||
| 					  sizeof(struct ftmac100_descs), | 					 sizeof(struct ftmac100_descs), | ||||||
| 					  &priv->descs_dma_addr, | 					 &priv->descs_dma_addr, GFP_KERNEL); | ||||||
| 					  GFP_KERNEL); |  | ||||||
| 	if (!priv->descs) | 	if (!priv->descs) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1006,8 +1006,8 @@ static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv) | ||||||
| 
 | 
 | ||||||
| 	for (i = 0; i < QUEUE_NUMS; i++) { | 	for (i = 0; i < QUEUE_NUMS; i++) { | ||||||
| 		size = priv->pool[i].count * sizeof(struct hix5hd2_desc); | 		size = priv->pool[i].count * sizeof(struct hix5hd2_desc); | ||||||
| 		virt_addr = dma_zalloc_coherent(dev, size, &phys_addr, | 		virt_addr = dma_alloc_coherent(dev, size, &phys_addr, | ||||||
| 						GFP_KERNEL); | 					       GFP_KERNEL); | ||||||
| 		if (virt_addr == NULL) | 		if (virt_addr == NULL) | ||||||
| 			goto error_free_pool; | 			goto error_free_pool; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -2041,9 +2041,8 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring) | ||||||
| { | { | ||||||
| 	int size = ring->desc_num * sizeof(ring->desc[0]); | 	int size = ring->desc_num * sizeof(ring->desc[0]); | ||||||
| 
 | 
 | ||||||
| 	ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, | 	ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, | ||||||
| 					 &ring->desc_dma_addr, | 					&ring->desc_dma_addr, GFP_KERNEL); | ||||||
| 					 GFP_KERNEL); |  | ||||||
| 	if (!ring->desc) | 	if (!ring->desc) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -39,9 +39,8 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) | ||||||
| { | { | ||||||
| 	int size  = ring->desc_num * sizeof(struct hclge_desc); | 	int size  = ring->desc_num * sizeof(struct hclge_desc); | ||||||
| 
 | 
 | ||||||
| 	ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), | 	ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size, | ||||||
| 					 size, &ring->desc_dma_addr, | 					&ring->desc_dma_addr, GFP_KERNEL); | ||||||
| 					 GFP_KERNEL); |  | ||||||
| 	if (!ring->desc) | 	if (!ring->desc) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -115,9 +115,8 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring) | ||||||
| { | { | ||||||
| 	int size = ring->desc_num * sizeof(struct hclgevf_desc); | 	int size = ring->desc_num * sizeof(struct hclgevf_desc); | ||||||
| 
 | 
 | ||||||
| 	ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), | 	ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size, | ||||||
| 					 size, &ring->desc_dma_addr, | 					&ring->desc_dma_addr, GFP_KERNEL); | ||||||
| 					 GFP_KERNEL); |  | ||||||
| 	if (!ring->desc) | 	if (!ring->desc) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -613,8 +613,8 @@ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain, | ||||||
| 	u8 *cmd_vaddr; | 	u8 *cmd_vaddr; | ||||||
| 	int err = 0; | 	int err = 0; | ||||||
| 
 | 
 | ||||||
| 	cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, | 	cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, | ||||||
| 					&cmd_paddr, GFP_KERNEL); | 				       &cmd_paddr, GFP_KERNEL); | ||||||
| 	if (!cmd_vaddr) { | 	if (!cmd_vaddr) { | ||||||
| 		dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n"); | 		dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  | @ -663,8 +663,8 @@ static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain, | ||||||
| 	dma_addr_t node_paddr; | 	dma_addr_t node_paddr; | ||||||
| 	int err; | 	int err; | ||||||
| 
 | 
 | ||||||
| 	node = dma_zalloc_coherent(&pdev->dev, chain->cell_size, | 	node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr, | ||||||
| 				   &node_paddr, GFP_KERNEL); | 				  GFP_KERNEL); | ||||||
| 	if (!node) { | 	if (!node) { | ||||||
| 		dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n"); | 		dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  | @ -821,10 +821,10 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain, | ||||||
| 	if (!chain->cell_ctxt) | 	if (!chain->cell_ctxt) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 	chain->wb_status = dma_zalloc_coherent(&pdev->dev, | 	chain->wb_status = dma_alloc_coherent(&pdev->dev, | ||||||
| 					       sizeof(*chain->wb_status), | 					      sizeof(*chain->wb_status), | ||||||
| 					       &chain->wb_status_paddr, | 					      &chain->wb_status_paddr, | ||||||
| 					       GFP_KERNEL); | 					      GFP_KERNEL); | ||||||
| 	if (!chain->wb_status) { | 	if (!chain->wb_status) { | ||||||
| 		dev_err(&pdev->dev, "Failed to allocate DMA wb status\n"); | 		dev_err(&pdev->dev, "Failed to allocate DMA wb status\n"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -593,10 +593,10 @@ static int alloc_eq_pages(struct hinic_eq *eq) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	for (pg = 0; pg < eq->num_pages; pg++) { | 	for (pg = 0; pg < eq->num_pages; pg++) { | ||||||
| 		eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev, | 		eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev, | ||||||
| 							eq->page_size, | 						       eq->page_size, | ||||||
| 							&eq->dma_addr[pg], | 						       &eq->dma_addr[pg], | ||||||
| 							GFP_KERNEL); | 						       GFP_KERNEL); | ||||||
| 		if (!eq->virt_addr[pg]) { | 		if (!eq->virt_addr[pg]) { | ||||||
| 			err = -ENOMEM; | 			err = -ENOMEM; | ||||||
| 			goto err_dma_alloc; | 			goto err_dma_alloc; | ||||||
|  |  | ||||||
|  | @ -355,9 +355,9 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, | ||||||
| 		goto err_sq_db; | 		goto err_sq_db; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), | 	ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), | ||||||
| 					   &func_to_io->ci_dma_base, | 					  &func_to_io->ci_dma_base, | ||||||
| 					   GFP_KERNEL); | 					  GFP_KERNEL); | ||||||
| 	if (!ci_addr_base) { | 	if (!ci_addr_base) { | ||||||
| 		dev_err(&pdev->dev, "Failed to allocate CI area\n"); | 		dev_err(&pdev->dev, "Failed to allocate CI area\n"); | ||||||
| 		err = -ENOMEM; | 		err = -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -336,9 +336,9 @@ static int alloc_rq_cqe(struct hinic_rq *rq) | ||||||
| 		goto err_cqe_dma_arr_alloc; | 		goto err_cqe_dma_arr_alloc; | ||||||
| 
 | 
 | ||||||
| 	for (i = 0; i < wq->q_depth; i++) { | 	for (i = 0; i < wq->q_depth; i++) { | ||||||
| 		rq->cqe[i] = dma_zalloc_coherent(&pdev->dev, | 		rq->cqe[i] = dma_alloc_coherent(&pdev->dev, | ||||||
| 						 sizeof(*rq->cqe[i]), | 						sizeof(*rq->cqe[i]), | ||||||
| 						 &rq->cqe_dma[i], GFP_KERNEL); | 						&rq->cqe_dma[i], GFP_KERNEL); | ||||||
| 		if (!rq->cqe[i]) | 		if (!rq->cqe[i]) | ||||||
| 			goto err_cqe_alloc; | 			goto err_cqe_alloc; | ||||||
| 	} | 	} | ||||||
|  | @ -415,8 +415,8 @@ int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, | ||||||
| 
 | 
 | ||||||
| 	/* HW requirements: Must be at least 32 bit */ | 	/* HW requirements: Must be at least 32 bit */ | ||||||
| 	pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); | 	pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); | ||||||
| 	rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size, | 	rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size, | ||||||
| 					       &rq->pi_dma_addr, GFP_KERNEL); | 					      &rq->pi_dma_addr, GFP_KERNEL); | ||||||
| 	if (!rq->pi_virt_addr) { | 	if (!rq->pi_virt_addr) { | ||||||
| 		dev_err(&pdev->dev, "Failed to allocate PI address\n"); | 		dev_err(&pdev->dev, "Failed to allocate PI address\n"); | ||||||
| 		err = -ENOMEM; | 		err = -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -114,8 +114,8 @@ static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr, | ||||||
| 	struct pci_dev *pdev = hwif->pdev; | 	struct pci_dev *pdev = hwif->pdev; | ||||||
| 	dma_addr_t dma_addr; | 	dma_addr_t dma_addr; | ||||||
| 
 | 
 | ||||||
| 	*vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr, | 	*vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr, | ||||||
| 				     GFP_KERNEL); | 				    GFP_KERNEL); | ||||||
| 	if (!*vaddr) { | 	if (!*vaddr) { | ||||||
| 		dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n"); | 		dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  | @ -482,8 +482,8 @@ static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, | ||||||
| 		u64 *paddr = &wq->block_vaddr[i]; | 		u64 *paddr = &wq->block_vaddr[i]; | ||||||
| 		dma_addr_t dma_addr; | 		dma_addr_t dma_addr; | ||||||
| 
 | 
 | ||||||
| 		*vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size, | 		*vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size, | ||||||
| 					     &dma_addr, GFP_KERNEL); | 					    &dma_addr, GFP_KERNEL); | ||||||
| 		if (!*vaddr) { | 		if (!*vaddr) { | ||||||
| 			dev_err(&pdev->dev, "Failed to allocate wq page\n"); | 			dev_err(&pdev->dev, "Failed to allocate wq page\n"); | ||||||
| 			goto err_alloc_wq_pages; | 			goto err_alloc_wq_pages; | ||||||
|  |  | ||||||
|  | @ -636,8 +636,8 @@ static int mal_probe(struct platform_device *ofdev) | ||||||
| 	bd_size = sizeof(struct mal_descriptor) * | 	bd_size = sizeof(struct mal_descriptor) * | ||||||
| 		(NUM_TX_BUFF * mal->num_tx_chans + | 		(NUM_TX_BUFF * mal->num_tx_chans + | ||||||
| 		 NUM_RX_BUFF * mal->num_rx_chans); | 		 NUM_RX_BUFF * mal->num_rx_chans); | ||||||
| 	mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, | 	mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, | ||||||
| 					   GFP_KERNEL); | 					  GFP_KERNEL); | ||||||
| 	if (mal->bd_virt == NULL) { | 	if (mal->bd_virt == NULL) { | ||||||
| 		err = -ENOMEM; | 		err = -ENOMEM; | ||||||
| 		goto fail_unmap; | 		goto fail_unmap; | ||||||
|  |  | ||||||
|  | @ -993,8 +993,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | ||||||
| 
 | 
 | ||||||
| 	txdr->size = txdr->count * sizeof(struct e1000_tx_desc); | 	txdr->size = txdr->count * sizeof(struct e1000_tx_desc); | ||||||
| 	txdr->size = ALIGN(txdr->size, 4096); | 	txdr->size = ALIGN(txdr->size, 4096); | ||||||
| 	txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, | 	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, | ||||||
| 					 GFP_KERNEL); | 					GFP_KERNEL); | ||||||
| 	if (!txdr->desc) { | 	if (!txdr->desc) { | ||||||
| 		ret_val = 2; | 		ret_val = 2; | ||||||
| 		goto err_nomem; | 		goto err_nomem; | ||||||
|  | @ -1051,8 +1051,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); | 	rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); | ||||||
| 	rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, | 	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, | ||||||
| 					 GFP_KERNEL); | 					GFP_KERNEL); | ||||||
| 	if (!rxdr->desc) { | 	if (!rxdr->desc) { | ||||||
| 		ret_val = 6; | 		ret_val = 6; | ||||||
| 		goto err_nomem; | 		goto err_nomem; | ||||||
|  |  | ||||||
|  | @ -2305,8 +2305,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, | ||||||
| { | { | ||||||
| 	struct pci_dev *pdev = adapter->pdev; | 	struct pci_dev *pdev = adapter->pdev; | ||||||
| 
 | 
 | ||||||
| 	ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma, | 	ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, | ||||||
| 					 GFP_KERNEL); | 					GFP_KERNEL); | ||||||
| 	if (!ring->desc) | 	if (!ring->desc) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -109,8 +109,8 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, | ||||||
| 	struct i40e_pf *pf = (struct i40e_pf *)hw->back; | 	struct i40e_pf *pf = (struct i40e_pf *)hw->back; | ||||||
| 
 | 
 | ||||||
| 	mem->size = ALIGN(size, alignment); | 	mem->size = ALIGN(size, alignment); | ||||||
| 	mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, | 	mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, | ||||||
| 				      &mem->pa, GFP_KERNEL); | 				     GFP_KERNEL); | ||||||
| 	if (!mem->va) | 	if (!mem->va) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -680,8 +680,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) | ||||||
| 	txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); | 	txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); | ||||||
| 	txdr->size = ALIGN(txdr->size, 4096); | 	txdr->size = ALIGN(txdr->size, 4096); | ||||||
| 
 | 
 | ||||||
| 	txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, | 	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, | ||||||
| 					 GFP_KERNEL); | 					GFP_KERNEL); | ||||||
| 	if (!txdr->desc) { | 	if (!txdr->desc) { | ||||||
| 		vfree(txdr->buffer_info); | 		vfree(txdr->buffer_info); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  | @ -763,8 +763,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) | ||||||
| 	rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); | 	rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); | ||||||
| 	rxdr->size = ALIGN(rxdr->size, 4096); | 	rxdr->size = ALIGN(rxdr->size, 4096); | ||||||
| 
 | 
 | ||||||
| 	rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, | 	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, | ||||||
| 					 GFP_KERNEL); | 					GFP_KERNEL); | ||||||
| 
 | 
 | ||||||
| 	if (!rxdr->desc) { | 	if (!rxdr->desc) { | ||||||
| 		vfree(rxdr->buffer_info); | 		vfree(rxdr->buffer_info); | ||||||
|  |  | ||||||
|  | @ -2044,9 +2044,9 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, | ||||||
| 	u32 txq_dma; | 	u32 txq_dma; | ||||||
| 
 | 
 | ||||||
| 	/* Allocate memory for TX descriptors */ | 	/* Allocate memory for TX descriptors */ | ||||||
| 	aggr_txq->descs = dma_zalloc_coherent(&pdev->dev, | 	aggr_txq->descs = dma_alloc_coherent(&pdev->dev, | ||||||
| 				MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, | 					     MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, | ||||||
| 				&aggr_txq->descs_dma, GFP_KERNEL); | 					     &aggr_txq->descs_dma, GFP_KERNEL); | ||||||
| 	if (!aggr_txq->descs) | 	if (!aggr_txq->descs) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -557,9 +557,9 @@ static int init_hash_table(struct pxa168_eth_private *pep) | ||||||
| 	 * table is full. | 	 * table is full. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (!pep->htpr) { | 	if (!pep->htpr) { | ||||||
| 		pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent, | 		pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, | ||||||
| 						HASH_ADDR_TABLE_SIZE, | 					       HASH_ADDR_TABLE_SIZE, | ||||||
| 						&pep->htpr_dma, GFP_KERNEL); | 					       &pep->htpr_dma, GFP_KERNEL); | ||||||
| 		if (!pep->htpr) | 		if (!pep->htpr) | ||||||
| 			return -ENOMEM; | 			return -ENOMEM; | ||||||
| 	} else { | 	} else { | ||||||
|  | @ -1044,9 +1044,9 @@ static int rxq_init(struct net_device *dev) | ||||||
| 	pep->rx_desc_count = 0; | 	pep->rx_desc_count = 0; | ||||||
| 	size = pep->rx_ring_size * sizeof(struct rx_desc); | 	size = pep->rx_ring_size * sizeof(struct rx_desc); | ||||||
| 	pep->rx_desc_area_size = size; | 	pep->rx_desc_area_size = size; | ||||||
| 	pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, | 	pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, | ||||||
| 						  &pep->rx_desc_dma, | 						 &pep->rx_desc_dma, | ||||||
| 						  GFP_KERNEL); | 						 GFP_KERNEL); | ||||||
| 	if (!pep->p_rx_desc_area) | 	if (!pep->p_rx_desc_area) | ||||||
| 		goto out; | 		goto out; | ||||||
| 
 | 
 | ||||||
|  | @ -1103,9 +1103,9 @@ static int txq_init(struct net_device *dev) | ||||||
| 	pep->tx_desc_count = 0; | 	pep->tx_desc_count = 0; | ||||||
| 	size = pep->tx_ring_size * sizeof(struct tx_desc); | 	size = pep->tx_ring_size * sizeof(struct tx_desc); | ||||||
| 	pep->tx_desc_area_size = size; | 	pep->tx_desc_area_size = size; | ||||||
| 	pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, | 	pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, | ||||||
| 						  &pep->tx_desc_dma, | 						 &pep->tx_desc_dma, | ||||||
| 						  GFP_KERNEL); | 						 GFP_KERNEL); | ||||||
| 	if (!pep->p_tx_desc_area) | 	if (!pep->p_tx_desc_area) | ||||||
| 		goto out; | 		goto out; | ||||||
| 	/* Initialize the next_desc_ptr links in the Tx descriptors ring */ | 	/* Initialize the next_desc_ptr links in the Tx descriptors ring */ | ||||||
|  |  | ||||||
|  | @ -598,10 +598,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) | ||||||
| 	dma_addr_t dma_addr; | 	dma_addr_t dma_addr; | ||||||
| 	int i; | 	int i; | ||||||
| 
 | 
 | ||||||
| 	eth->scratch_ring = dma_zalloc_coherent(eth->dev, | 	eth->scratch_ring = dma_alloc_coherent(eth->dev, | ||||||
| 						cnt * sizeof(struct mtk_tx_dma), | 					       cnt * sizeof(struct mtk_tx_dma), | ||||||
| 						ð->phy_scratch_ring, | 					       ð->phy_scratch_ring, | ||||||
| 						GFP_ATOMIC); | 					       GFP_ATOMIC); | ||||||
| 	if (unlikely(!eth->scratch_ring)) | 	if (unlikely(!eth->scratch_ring)) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -1213,8 +1213,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth) | ||||||
| 	if (!ring->buf) | 	if (!ring->buf) | ||||||
| 		goto no_tx_mem; | 		goto no_tx_mem; | ||||||
| 
 | 
 | ||||||
| 	ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz, | 	ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, | ||||||
| 					&ring->phys, GFP_ATOMIC); | 				       &ring->phys, GFP_ATOMIC); | ||||||
| 	if (!ring->dma) | 	if (!ring->dma) | ||||||
| 		goto no_tx_mem; | 		goto no_tx_mem; | ||||||
| 
 | 
 | ||||||
|  | @ -1310,9 +1310,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) | ||||||
| 			return -ENOMEM; | 			return -ENOMEM; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	ring->dma = dma_zalloc_coherent(eth->dev, | 	ring->dma = dma_alloc_coherent(eth->dev, | ||||||
| 					rx_dma_size * sizeof(*ring->dma), | 				       rx_dma_size * sizeof(*ring->dma), | ||||||
| 					&ring->phys, GFP_ATOMIC); | 				       &ring->phys, GFP_ATOMIC); | ||||||
| 	if (!ring->dma) | 	if (!ring->dma) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -584,8 +584,8 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size, | ||||||
| 	buf->npages       = 1; | 	buf->npages       = 1; | ||||||
| 	buf->page_shift   = get_order(size) + PAGE_SHIFT; | 	buf->page_shift   = get_order(size) + PAGE_SHIFT; | ||||||
| 	buf->direct.buf   = | 	buf->direct.buf   = | ||||||
| 		dma_zalloc_coherent(&dev->persist->pdev->dev, | 		dma_alloc_coherent(&dev->persist->pdev->dev, size, &t, | ||||||
| 				    size, &t, GFP_KERNEL); | 				   GFP_KERNEL); | ||||||
| 	if (!buf->direct.buf) | 	if (!buf->direct.buf) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -624,8 +624,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | ||||||
| 
 | 
 | ||||||
| 		for (i = 0; i < buf->nbufs; ++i) { | 		for (i = 0; i < buf->nbufs; ++i) { | ||||||
| 			buf->page_list[i].buf = | 			buf->page_list[i].buf = | ||||||
| 				dma_zalloc_coherent(&dev->persist->pdev->dev, | 				dma_alloc_coherent(&dev->persist->pdev->dev, | ||||||
| 						    PAGE_SIZE, &t, GFP_KERNEL); | 						   PAGE_SIZE, &t, GFP_KERNEL); | ||||||
| 			if (!buf->page_list[i].buf) | 			if (!buf->page_list[i].buf) | ||||||
| 				goto err_free; | 				goto err_free; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -63,8 +63,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, | ||||||
| 	mutex_lock(&priv->alloc_mutex); | 	mutex_lock(&priv->alloc_mutex); | ||||||
| 	original_node = dev_to_node(&dev->pdev->dev); | 	original_node = dev_to_node(&dev->pdev->dev); | ||||||
| 	set_dev_node(&dev->pdev->dev, node); | 	set_dev_node(&dev->pdev->dev, node); | ||||||
| 	cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size, | 	cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle, | ||||||
| 					 dma_handle, GFP_KERNEL); | 					GFP_KERNEL); | ||||||
| 	set_dev_node(&dev->pdev->dev, original_node); | 	set_dev_node(&dev->pdev->dev, original_node); | ||||||
| 	mutex_unlock(&priv->alloc_mutex); | 	mutex_unlock(&priv->alloc_mutex); | ||||||
| 	return cpu_handle; | 	return cpu_handle; | ||||||
|  |  | ||||||
|  | @ -1789,8 +1789,8 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) | ||||||
| { | { | ||||||
| 	struct device *ddev = &dev->pdev->dev; | 	struct device *ddev = &dev->pdev->dev; | ||||||
| 
 | 
 | ||||||
| 	cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, | 	cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, | ||||||
| 						 &cmd->alloc_dma, GFP_KERNEL); | 						&cmd->alloc_dma, GFP_KERNEL); | ||||||
| 	if (!cmd->cmd_alloc_buf) | 	if (!cmd->cmd_alloc_buf) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -1804,9 +1804,9 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) | ||||||
| 
 | 
 | ||||||
| 	dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, | 	dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, | ||||||
| 			  cmd->alloc_dma); | 			  cmd->alloc_dma); | ||||||
| 	cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, | 	cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, | ||||||
| 						 2 * MLX5_ADAPTER_PAGE_SIZE - 1, | 						2 * MLX5_ADAPTER_PAGE_SIZE - 1, | ||||||
| 						 &cmd->alloc_dma, GFP_KERNEL); | 						&cmd->alloc_dma, GFP_KERNEL); | ||||||
| 	if (!cmd->cmd_alloc_buf) | 	if (!cmd->cmd_alloc_buf) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -3604,9 +3604,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp) | ||||||
| 	for (i = 0; i < mgp->num_slices; i++) { | 	for (i = 0; i < mgp->num_slices; i++) { | ||||||
| 		ss = &mgp->ss[i]; | 		ss = &mgp->ss[i]; | ||||||
| 		bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); | 		bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); | ||||||
| 		ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes, | 		ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, | ||||||
| 							&ss->rx_done.bus, | 						       &ss->rx_done.bus, | ||||||
| 							GFP_KERNEL); | 						       GFP_KERNEL); | ||||||
| 		if (ss->rx_done.entry == NULL) | 		if (ss->rx_done.entry == NULL) | ||||||
| 			goto abort; | 			goto abort; | ||||||
| 		bytes = sizeof(*ss->fw_stats); | 		bytes = sizeof(*ss->fw_stats); | ||||||
|  |  | ||||||
|  | @ -2170,9 +2170,9 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) | ||||||
| 	tx_ring->cnt = dp->txd_cnt; | 	tx_ring->cnt = dp->txd_cnt; | ||||||
| 
 | 
 | ||||||
| 	tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); | 	tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); | ||||||
| 	tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, | 	tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size, | ||||||
| 					    &tx_ring->dma, | 					   &tx_ring->dma, | ||||||
| 					    GFP_KERNEL | __GFP_NOWARN); | 					   GFP_KERNEL | __GFP_NOWARN); | ||||||
| 	if (!tx_ring->txds) { | 	if (!tx_ring->txds) { | ||||||
| 		netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", | 		netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", | ||||||
| 			    tx_ring->cnt); | 			    tx_ring->cnt); | ||||||
|  | @ -2328,9 +2328,9 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) | ||||||
| 
 | 
 | ||||||
| 	rx_ring->cnt = dp->rxd_cnt; | 	rx_ring->cnt = dp->rxd_cnt; | ||||||
| 	rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); | 	rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); | ||||||
| 	rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, | 	rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size, | ||||||
| 					    &rx_ring->dma, | 					   &rx_ring->dma, | ||||||
| 					    GFP_KERNEL | __GFP_NOWARN); | 					   GFP_KERNEL | __GFP_NOWARN); | ||||||
| 	if (!rx_ring->rxds) { | 	if (!rx_ring->rxds) { | ||||||
| 		netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", | 		netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", | ||||||
| 			    rx_ring->cnt); | 			    rx_ring->cnt); | ||||||
|  |  | ||||||
|  | @ -287,9 +287,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev) | ||||||
| 	priv->rx_bd_ci = 0; | 	priv->rx_bd_ci = 0; | ||||||
| 
 | 
 | ||||||
| 	/* Allocate the Tx and Rx buffer descriptors. */ | 	/* Allocate the Tx and Rx buffer descriptors. */ | ||||||
| 	priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, | 	priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, | ||||||
| 					    sizeof(*priv->tx_bd_v) * TX_BD_NUM, | 					   sizeof(*priv->tx_bd_v) * TX_BD_NUM, | ||||||
| 					    &priv->tx_bd_p, GFP_KERNEL); | 					   &priv->tx_bd_p, GFP_KERNEL); | ||||||
| 	if (!priv->tx_bd_v) | 	if (!priv->tx_bd_v) | ||||||
| 		goto out; | 		goto out; | ||||||
| 
 | 
 | ||||||
|  | @ -299,9 +299,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev) | ||||||
| 	if (!priv->tx_skb) | 	if (!priv->tx_skb) | ||||||
| 		goto out; | 		goto out; | ||||||
| 
 | 
 | ||||||
| 	priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, | 	priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, | ||||||
| 					    sizeof(*priv->rx_bd_v) * RX_BD_NUM, | 					   sizeof(*priv->rx_bd_v) * RX_BD_NUM, | ||||||
| 					    &priv->rx_bd_p, GFP_KERNEL); | 					   &priv->rx_bd_p, GFP_KERNEL); | ||||||
| 	if (!priv->rx_bd_v) | 	if (!priv->rx_bd_v) | ||||||
| 		goto out; | 		goto out; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1440,8 +1440,8 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter, | ||||||
| 
 | 
 | ||||||
| 	size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; | 	size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; | ||||||
| 	rx_ring->rx_buff_pool = | 	rx_ring->rx_buff_pool = | ||||||
| 		dma_zalloc_coherent(&pdev->dev, size, | 		dma_alloc_coherent(&pdev->dev, size, | ||||||
| 				    &rx_ring->rx_buff_pool_logic, GFP_KERNEL); | 				   &rx_ring->rx_buff_pool_logic, GFP_KERNEL); | ||||||
| 	if (!rx_ring->rx_buff_pool) | 	if (!rx_ring->rx_buff_pool) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -1755,8 +1755,8 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, | ||||||
| 
 | 
 | ||||||
| 	tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); | 	tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); | ||||||
| 
 | 
 | ||||||
| 	tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size, | 	tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, | ||||||
| 					    &tx_ring->dma, GFP_KERNEL); | 					   &tx_ring->dma, GFP_KERNEL); | ||||||
| 	if (!tx_ring->desc) { | 	if (!tx_ring->desc) { | ||||||
| 		vfree(tx_ring->buffer_info); | 		vfree(tx_ring->buffer_info); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  | @ -1798,8 +1798,8 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter, | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 	rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); | 	rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); | ||||||
| 	rx_ring->desc =	dma_zalloc_coherent(&pdev->dev, rx_ring->size, | 	rx_ring->desc =	dma_alloc_coherent(&pdev->dev, rx_ring->size, | ||||||
| 					    &rx_ring->dma, GFP_KERNEL); | 						  &rx_ring->dma, GFP_KERNEL); | ||||||
| 	if (!rx_ring->desc) { | 	if (!rx_ring->desc) { | ||||||
| 		vfree(rx_ring->buffer_info); | 		vfree(rx_ring->buffer_info); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -401,9 +401,9 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev) | ||||||
| 	if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) | 	if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) | ||||||
| 		goto out_ring_desc; | 		goto out_ring_desc; | ||||||
| 
 | 
 | ||||||
| 	ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev, | 	ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, | ||||||
| 					    RX_RING_SIZE * sizeof(u64), | 					   RX_RING_SIZE * sizeof(u64), | ||||||
| 					    &ring->buf_dma, GFP_KERNEL); | 					   &ring->buf_dma, GFP_KERNEL); | ||||||
| 	if (!ring->buffers) | 	if (!ring->buffers) | ||||||
| 		goto out_ring_desc; | 		goto out_ring_desc; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -936,9 +936,9 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn) | ||||||
| 		u32 size = min_t(u32, total_size, psz); | 		u32 size = min_t(u32, total_size, psz); | ||||||
| 		void **p_virt = &p_mngr->t2[i].p_virt; | 		void **p_virt = &p_mngr->t2[i].p_virt; | ||||||
| 
 | 
 | ||||||
| 		*p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, | 		*p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, | ||||||
| 					      size, &p_mngr->t2[i].p_phys, | 					     &p_mngr->t2[i].p_phys, | ||||||
| 					      GFP_KERNEL); | 					     GFP_KERNEL); | ||||||
| 		if (!p_mngr->t2[i].p_virt) { | 		if (!p_mngr->t2[i].p_virt) { | ||||||
| 			rc = -ENOMEM; | 			rc = -ENOMEM; | ||||||
| 			goto t2_fail; | 			goto t2_fail; | ||||||
|  | @ -1054,8 +1054,8 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, | ||||||
| 		u32 size; | 		u32 size; | ||||||
| 
 | 
 | ||||||
| 		size = min_t(u32, sz_left, p_blk->real_size_in_page); | 		size = min_t(u32, sz_left, p_blk->real_size_in_page); | ||||||
| 		p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size, | 		p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, | ||||||
| 					     &p_phys, GFP_KERNEL); | 					    &p_phys, GFP_KERNEL); | ||||||
| 		if (!p_virt) | 		if (!p_virt) | ||||||
| 			return -ENOMEM; | 			return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -2306,9 +2306,9 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, | ||||||
| 		goto out0; | 		goto out0; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, | 	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, | ||||||
| 				     p_blk->real_size_in_page, &p_phys, | 				    p_blk->real_size_in_page, &p_phys, | ||||||
| 				     GFP_KERNEL); | 				    GFP_KERNEL); | ||||||
| 	if (!p_virt) { | 	if (!p_virt) { | ||||||
| 		rc = -ENOMEM; | 		rc = -ENOMEM; | ||||||
| 		goto out1; | 		goto out1; | ||||||
|  |  | ||||||
|  | @ -434,14 +434,14 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, | ||||||
| 	*(tx_ring->hw_consumer) = 0; | 	*(tx_ring->hw_consumer) = 0; | ||||||
| 
 | 
 | ||||||
| 	rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); | 	rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); | ||||||
| 	rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size, | 	rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, | ||||||
| 				      &rq_phys_addr, GFP_KERNEL); | 				     &rq_phys_addr, GFP_KERNEL); | ||||||
| 	if (!rq_addr) | 	if (!rq_addr) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 	rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); | 	rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); | ||||||
| 	rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size, | 	rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, | ||||||
| 				       &rsp_phys_addr, GFP_KERNEL); | 				      &rsp_phys_addr, GFP_KERNEL); | ||||||
| 	if (!rsp_addr) { | 	if (!rsp_addr) { | ||||||
| 		err = -ENOMEM; | 		err = -ENOMEM; | ||||||
| 		goto out_free_rq; | 		goto out_free_rq; | ||||||
|  | @ -855,8 +855,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, | ||||||
| 	struct qlcnic_cmd_args cmd; | 	struct qlcnic_cmd_args cmd; | ||||||
| 	size_t  nic_size = sizeof(struct qlcnic_info_le); | 	size_t  nic_size = sizeof(struct qlcnic_info_le); | ||||||
| 
 | 
 | ||||||
| 	nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, | 	nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, | ||||||
| 					    &nic_dma_t, GFP_KERNEL); | 					   &nic_dma_t, GFP_KERNEL); | ||||||
| 	if (!nic_info_addr) | 	if (!nic_info_addr) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -909,8 +909,8 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter, | ||||||
| 	if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) | 	if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) | ||||||
| 		return err; | 		return err; | ||||||
| 
 | 
 | ||||||
| 	nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, | 	nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, | ||||||
| 					    &nic_dma_t, GFP_KERNEL); | 					   &nic_dma_t, GFP_KERNEL); | ||||||
| 	if (!nic_info_addr) | 	if (!nic_info_addr) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -964,8 +964,8 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, | ||||||
| 	void *pci_info_addr; | 	void *pci_info_addr; | ||||||
| 	int err = 0, i; | 	int err = 0, i; | ||||||
| 
 | 
 | ||||||
| 	pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size, | 	pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, | ||||||
| 					    &pci_info_dma_t, GFP_KERNEL); | 					   &pci_info_dma_t, GFP_KERNEL); | ||||||
| 	if (!pci_info_addr) | 	if (!pci_info_addr) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -1078,8 +1078,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, | ||||||
| 		return -EIO; | 		return -EIO; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, | 	stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, | ||||||
| 					 &stats_dma_t, GFP_KERNEL); | 					&stats_dma_t, GFP_KERNEL); | ||||||
| 	if (!stats_addr) | 	if (!stats_addr) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -1134,8 +1134,8 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, | ||||||
| 	if (mac_stats == NULL) | 	if (mac_stats == NULL) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 	stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, | 	stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, | ||||||
| 					 &stats_dma_t, GFP_KERNEL); | 					&stats_dma_t, GFP_KERNEL); | ||||||
| 	if (!stats_addr) | 	if (!stats_addr) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -776,7 +776,7 @@ int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt) | ||||||
| 			    8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */ | 			    8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */ | ||||||
| 
 | 
 | ||||||
| 	ring_header->used = 0; | 	ring_header->used = 0; | ||||||
| 	ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size, | 	ring_header->v_addr = dma_alloc_coherent(dev, ring_header->size, | ||||||
| 						 &ring_header->dma_addr, | 						 &ring_header->dma_addr, | ||||||
| 						 GFP_KERNEL); | 						 GFP_KERNEL); | ||||||
| 	if (!ring_header->v_addr) | 	if (!ring_header->v_addr) | ||||||
|  |  | ||||||
|  | @ -400,9 +400,9 @@ static int init_tx_ring(struct device *dev, u8 queue_no, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* allocate memory for TX descriptors */ | 	/* allocate memory for TX descriptors */ | ||||||
| 	tx_ring->dma_tx = dma_zalloc_coherent(dev, | 	tx_ring->dma_tx = dma_alloc_coherent(dev, | ||||||
| 					      tx_rsize * sizeof(struct sxgbe_tx_norm_desc), | 					     tx_rsize * sizeof(struct sxgbe_tx_norm_desc), | ||||||
| 					      &tx_ring->dma_tx_phy, GFP_KERNEL); | 					     &tx_ring->dma_tx_phy, GFP_KERNEL); | ||||||
| 	if (!tx_ring->dma_tx) | 	if (!tx_ring->dma_tx) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  | @ -479,9 +479,9 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, | ||||||
| 	rx_ring->queue_no = queue_no; | 	rx_ring->queue_no = queue_no; | ||||||
| 
 | 
 | ||||||
| 	/* allocate memory for RX descriptors */ | 	/* allocate memory for RX descriptors */ | ||||||
| 	rx_ring->dma_rx = dma_zalloc_coherent(priv->device, | 	rx_ring->dma_rx = dma_alloc_coherent(priv->device, | ||||||
| 					      rx_rsize * sizeof(struct sxgbe_rx_norm_desc), | 					     rx_rsize * sizeof(struct sxgbe_rx_norm_desc), | ||||||
| 					      &rx_ring->dma_rx_phy, GFP_KERNEL); | 					     &rx_ring->dma_rx_phy, GFP_KERNEL); | ||||||
| 
 | 
 | ||||||
| 	if (rx_ring->dma_rx == NULL) | 	if (rx_ring->dma_rx == NULL) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
Some files were not shown because too many files have changed in this diff Show more
		Loading…
	
		Reference in a new issue
	
	 Luis Chamberlain
						Luis Chamberlain