mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	cross-tree: phase out dma_zalloc_coherent()
We already need to zero out memory for dma_alloc_coherent(), as such using dma_zalloc_coherent() is superflous. Phase it out. This change was generated with the following Coccinelle SmPL patch: @ replace_dma_zalloc_coherent @ expression dev, size, data, handle, flags; @@ -dma_zalloc_coherent(dev, size, handle, flags) +dma_alloc_coherent(dev, size, handle, flags) Suggested-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Luis Chamberlain <mcgrof@kernel.org> [hch: re-ran the script on the latest tree] Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
		
							parent
							
								
									3bd6e94bec
								
							
						
					
					
						commit
						750afb08ca
					
				
					 173 changed files with 915 additions and 949 deletions
				
			
		| 
						 | 
				
			
			@ -129,7 +129,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
 | 
			
		|||
	unsigned long flags;
 | 
			
		||||
 | 
			
		||||
	ch->desc = 0;
 | 
			
		||||
	ch->desc_base = dma_zalloc_coherent(ch->dev,
 | 
			
		||||
	ch->desc_base = dma_alloc_coherent(ch->dev,
 | 
			
		||||
					   LTQ_DESC_NUM * LTQ_DESC_SIZE,
 | 
			
		||||
					   &ch->phys, GFP_ATOMIC);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -255,7 +255,7 @@ int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)
 | 
			
		|||
 | 
			
		||||
	chan->ring_size = ring_size;
 | 
			
		||||
 | 
			
		||||
	chan->ring_virt = dma_zalloc_coherent(&dma_pdev->dev,
 | 
			
		||||
	chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
 | 
			
		||||
					     ring_size * sizeof(u64),
 | 
			
		||||
					     &chan->ring_dma, GFP_KERNEL);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -756,9 +756,10 @@ fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	/* Initialize outbound message descriptor ring */
 | 
			
		||||
	rmu->msg_tx_ring.virt = dma_zalloc_coherent(priv->dev,
 | 
			
		||||
	rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
 | 
			
		||||
						   rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
 | 
			
		||||
				&rmu->msg_tx_ring.phys, GFP_KERNEL);
 | 
			
		||||
						   &rmu->msg_tx_ring.phys,
 | 
			
		||||
						   GFP_KERNEL);
 | 
			
		||||
	if (!rmu->msg_tx_ring.virt) {
 | 
			
		||||
		rc = -ENOMEM;
 | 
			
		||||
		goto out_dma;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -729,7 +729,7 @@ static int sata_fsl_port_start(struct ata_port *ap)
 | 
			
		|||
	if (!pp)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	mem = dma_zalloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
 | 
			
		||||
	mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
 | 
			
		||||
				 GFP_KERNEL);
 | 
			
		||||
	if (!mem) {
 | 
			
		||||
		kfree(pp);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -533,9 +533,10 @@ static void he_init_tx_lbfp(struct he_dev *he_dev)
 | 
			
		|||
 | 
			
		||||
static int he_init_tpdrq(struct he_dev *he_dev)
 | 
			
		||||
{
 | 
			
		||||
	he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
 | 
			
		||||
	he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
 | 
			
		||||
						CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
 | 
			
		||||
						 &he_dev->tpdrq_phys, GFP_KERNEL);
 | 
			
		||||
						&he_dev->tpdrq_phys,
 | 
			
		||||
						GFP_KERNEL);
 | 
			
		||||
	if (he_dev->tpdrq_base == NULL) {
 | 
			
		||||
		hprintk("failed to alloc tpdrq\n");
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -805,7 +806,7 @@ static int he_init_group(struct he_dev *he_dev, int group)
 | 
			
		|||
		goto out_free_rbpl_virt;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
 | 
			
		||||
	he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
 | 
			
		||||
					       CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
 | 
			
		||||
					       &he_dev->rbpl_phys, GFP_KERNEL);
 | 
			
		||||
	if (he_dev->rbpl_base == NULL) {
 | 
			
		||||
| 
						 | 
				
			
			@ -844,7 +845,7 @@ static int he_init_group(struct he_dev *he_dev, int group)
 | 
			
		|||
 | 
			
		||||
	/* rx buffer ready queue */
 | 
			
		||||
 | 
			
		||||
	he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
 | 
			
		||||
	he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
 | 
			
		||||
					       CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
 | 
			
		||||
					       &he_dev->rbrq_phys, GFP_KERNEL);
 | 
			
		||||
	if (he_dev->rbrq_base == NULL) {
 | 
			
		||||
| 
						 | 
				
			
			@ -868,7 +869,7 @@ static int he_init_group(struct he_dev *he_dev, int group)
 | 
			
		|||
 | 
			
		||||
	/* tx buffer ready queue */
 | 
			
		||||
 | 
			
		||||
	he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
 | 
			
		||||
	he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
 | 
			
		||||
					       CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
 | 
			
		||||
					       &he_dev->tbrq_phys, GFP_KERNEL);
 | 
			
		||||
	if (he_dev->tbrq_base == NULL) {
 | 
			
		||||
| 
						 | 
				
			
			@ -913,11 +914,9 @@ static int he_init_irq(struct he_dev *he_dev)
 | 
			
		|||
	/* 2.9.3.5  tail offset for each interrupt queue is located after the
 | 
			
		||||
		    end of the interrupt queue */
 | 
			
		||||
 | 
			
		||||
	he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
 | 
			
		||||
					       (CONFIG_IRQ_SIZE + 1)
 | 
			
		||||
					       * sizeof(struct he_irq),
 | 
			
		||||
					       &he_dev->irq_phys,
 | 
			
		||||
					       GFP_KERNEL);
 | 
			
		||||
	he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
 | 
			
		||||
					      (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq),
 | 
			
		||||
					      &he_dev->irq_phys, GFP_KERNEL);
 | 
			
		||||
	if (he_dev->irq_base == NULL) {
 | 
			
		||||
		hprintk("failed to allocate irq\n");
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -1464,7 +1463,7 @@ static int he_start(struct atm_dev *dev)
 | 
			
		|||
 | 
			
		||||
	/* host status page */
 | 
			
		||||
 | 
			
		||||
	he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev,
 | 
			
		||||
	he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev,
 | 
			
		||||
					 sizeof(struct he_hsp),
 | 
			
		||||
					 &he_dev->hsp_phys, GFP_KERNEL);
 | 
			
		||||
	if (he_dev->hsp == NULL) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -641,7 +641,7 @@ alloc_scq(struct idt77252_dev *card, int class)
 | 
			
		|||
	scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL);
 | 
			
		||||
	if (!scq)
 | 
			
		||||
		return NULL;
 | 
			
		||||
	scq->base = dma_zalloc_coherent(&card->pcidev->dev, SCQ_SIZE,
 | 
			
		||||
	scq->base = dma_alloc_coherent(&card->pcidev->dev, SCQ_SIZE,
 | 
			
		||||
				       &scq->paddr, GFP_KERNEL);
 | 
			
		||||
	if (scq->base == NULL) {
 | 
			
		||||
		kfree(scq);
 | 
			
		||||
| 
						 | 
				
			
			@ -971,7 +971,7 @@ init_rsq(struct idt77252_dev *card)
 | 
			
		|||
{
 | 
			
		||||
	struct rsq_entry *rsqe;
 | 
			
		||||
 | 
			
		||||
	card->rsq.base = dma_zalloc_coherent(&card->pcidev->dev, RSQSIZE,
 | 
			
		||||
	card->rsq.base = dma_alloc_coherent(&card->pcidev->dev, RSQSIZE,
 | 
			
		||||
					    &card->rsq.paddr, GFP_KERNEL);
 | 
			
		||||
	if (card->rsq.base == NULL) {
 | 
			
		||||
		printk("%s: can't allocate RSQ.\n", card->name);
 | 
			
		||||
| 
						 | 
				
			
			@ -3390,7 +3390,7 @@ static int init_card(struct atm_dev *dev)
 | 
			
		|||
	writel(0, SAR_REG_GP);
 | 
			
		||||
 | 
			
		||||
	/* Initialize RAW Cell Handle Register  */
 | 
			
		||||
	card->raw_cell_hnd = dma_zalloc_coherent(&card->pcidev->dev,
 | 
			
		||||
	card->raw_cell_hnd = dma_alloc_coherent(&card->pcidev->dev,
 | 
			
		||||
						2 * sizeof(u32),
 | 
			
		||||
						&card->raw_cell_paddr,
 | 
			
		||||
						GFP_KERNEL);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2641,7 +2641,7 @@ static int skd_cons_skcomp(struct skd_device *skdev)
 | 
			
		|||
		"comp pci_alloc, total bytes %zd entries %d\n",
 | 
			
		||||
		SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
 | 
			
		||||
 | 
			
		||||
	skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
 | 
			
		||||
	skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
 | 
			
		||||
				    &skdev->cq_dma_address, GFP_KERNEL);
 | 
			
		||||
 | 
			
		||||
	if (skcomp == NULL) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -283,7 +283,7 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
 | 
			
		|||
 */
 | 
			
		||||
static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
 | 
			
		||||
{
 | 
			
		||||
	dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
 | 
			
		||||
	dev->gdr = dma_alloc_coherent(dev->core_dev->device,
 | 
			
		||||
				      sizeof(struct ce_gd) * PPC4XX_NUM_GD,
 | 
			
		||||
				      &dev->gdr_pa, GFP_ATOMIC);
 | 
			
		||||
	if (!dev->gdr)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -278,7 +278,7 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
 | 
			
		|||
	mcode->num_cores = is_ae ? 6 : 10;
 | 
			
		||||
 | 
			
		||||
	/*  Allocate DMAable space */
 | 
			
		||||
	mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size,
 | 
			
		||||
	mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size,
 | 
			
		||||
					 &mcode->phys_base, GFP_KERNEL);
 | 
			
		||||
	if (!mcode->code) {
 | 
			
		||||
		dev_err(dev, "Unable to allocate space for microcode");
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -236,9 +236,10 @@ static int alloc_command_queues(struct cpt_vf *cptvf,
 | 
			
		|||
 | 
			
		||||
			c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
 | 
			
		||||
					rem_q_size;
 | 
			
		||||
			curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev,
 | 
			
		||||
			curr->head = (u8 *)dma_alloc_coherent(&pdev->dev,
 | 
			
		||||
							      c_size + CPT_NEXT_CHUNK_PTR_SIZE,
 | 
			
		||||
					  &curr->dma_addr, GFP_KERNEL);
 | 
			
		||||
							      &curr->dma_addr,
 | 
			
		||||
							      GFP_KERNEL);
 | 
			
		||||
			if (!curr->head) {
 | 
			
		||||
				dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
 | 
			
		||||
					i, queue->nchunks);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -25,7 +25,7 @@ static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
 | 
			
		|||
	struct nitrox_device *ndev = cmdq->ndev;
 | 
			
		||||
 | 
			
		||||
	cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
 | 
			
		||||
	cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize,
 | 
			
		||||
	cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize,
 | 
			
		||||
						&cmdq->unalign_dma,
 | 
			
		||||
						GFP_KERNEL);
 | 
			
		||||
	if (!cmdq->unalign_base)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -822,7 +822,7 @@ static int ccp5_init(struct ccp_device *ccp)
 | 
			
		|||
		/* Page alignment satisfies our needs for N <= 128 */
 | 
			
		||||
		BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
 | 
			
		||||
		cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
 | 
			
		||||
		cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize,
 | 
			
		||||
		cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize,
 | 
			
		||||
						  &cmd_q->qbase_dma,
 | 
			
		||||
						  GFP_KERNEL);
 | 
			
		||||
		if (!cmd_q->qbase) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -241,7 +241,7 @@ static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
 | 
			
		|||
		memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
 | 
			
		||||
	} else {
 | 
			
		||||
		/* new key */
 | 
			
		||||
		ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY,
 | 
			
		||||
		ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY,
 | 
			
		||||
					      &ctx->pkey, GFP_KERNEL);
 | 
			
		||||
		if (!ctx->key) {
 | 
			
		||||
			mutex_unlock(&ctx->lock);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1082,9 +1082,8 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
 | 
			
		|||
	struct sec_queue_ring_db *ring_db = &queue->ring_db;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE,
 | 
			
		||||
					      &ring_cmd->paddr,
 | 
			
		||||
					      GFP_KERNEL);
 | 
			
		||||
	ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE,
 | 
			
		||||
					     &ring_cmd->paddr, GFP_KERNEL);
 | 
			
		||||
	if (!ring_cmd->vaddr)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1092,17 +1091,15 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
 | 
			
		|||
	mutex_init(&ring_cmd->lock);
 | 
			
		||||
	ring_cmd->callback = sec_alg_callback;
 | 
			
		||||
 | 
			
		||||
	ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE,
 | 
			
		||||
					     &ring_cq->paddr,
 | 
			
		||||
					     GFP_KERNEL);
 | 
			
		||||
	ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE,
 | 
			
		||||
					    &ring_cq->paddr, GFP_KERNEL);
 | 
			
		||||
	if (!ring_cq->vaddr) {
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
		goto err_free_ring_cmd;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE,
 | 
			
		||||
					     &ring_db->paddr,
 | 
			
		||||
					     GFP_KERNEL);
 | 
			
		||||
	ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE,
 | 
			
		||||
					    &ring_db->paddr, GFP_KERNEL);
 | 
			
		||||
	if (!ring_db->vaddr) {
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
		goto err_free_ring_cq;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -260,7 +260,7 @@ static int setup_crypt_desc(void)
 | 
			
		|||
{
 | 
			
		||||
	struct device *dev = &pdev->dev;
 | 
			
		||||
	BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
 | 
			
		||||
	crypt_virt = dma_zalloc_coherent(dev,
 | 
			
		||||
	crypt_virt = dma_alloc_coherent(dev,
 | 
			
		||||
					NPE_QLEN * sizeof(struct crypt_ctl),
 | 
			
		||||
					&crypt_phys, GFP_ATOMIC);
 | 
			
		||||
	if (!crypt_virt)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -453,14 +453,14 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
 | 
			
		|||
		if (!ring[i])
 | 
			
		||||
			goto err_cleanup;
 | 
			
		||||
 | 
			
		||||
		ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev,
 | 
			
		||||
		ring[i]->cmd_base = dma_alloc_coherent(cryp->dev,
 | 
			
		||||
						       MTK_DESC_RING_SZ,
 | 
			
		||||
						       &ring[i]->cmd_dma,
 | 
			
		||||
						       GFP_KERNEL);
 | 
			
		||||
		if (!ring[i]->cmd_base)
 | 
			
		||||
			goto err_cleanup;
 | 
			
		||||
 | 
			
		||||
		ring[i]->res_base = dma_zalloc_coherent(cryp->dev,
 | 
			
		||||
		ring[i]->res_base = dma_alloc_coherent(cryp->dev,
 | 
			
		||||
						       MTK_DESC_RING_SZ,
 | 
			
		||||
						       &ring[i]->res_dma,
 | 
			
		||||
						       GFP_KERNEL);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -244,7 +244,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
 | 
			
		|||
			     dev_to_node(&GET_DEV(accel_dev)));
 | 
			
		||||
	if (!admin)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
 | 
			
		||||
	admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
 | 
			
		||||
					      &admin->phy_addr, GFP_KERNEL);
 | 
			
		||||
	if (!admin->virt_addr) {
 | 
			
		||||
		dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
 | 
			
		||||
| 
						 | 
				
			
			@ -252,7 +252,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
 | 
			
		|||
		return -ENOMEM;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev),
 | 
			
		||||
	admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
 | 
			
		||||
						  PAGE_SIZE,
 | 
			
		||||
						  &admin->const_tbl_addr,
 | 
			
		||||
						  GFP_KERNEL);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -601,13 +601,13 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
 | 
			
		|||
 | 
			
		||||
		dev = &GET_DEV(inst->accel_dev);
 | 
			
		||||
		ctx->inst = inst;
 | 
			
		||||
		ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
 | 
			
		||||
		ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
 | 
			
		||||
						 &ctx->enc_cd_paddr,
 | 
			
		||||
						 GFP_ATOMIC);
 | 
			
		||||
		if (!ctx->enc_cd) {
 | 
			
		||||
			return -ENOMEM;
 | 
			
		||||
		}
 | 
			
		||||
		ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
 | 
			
		||||
		ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
 | 
			
		||||
						 &ctx->dec_cd_paddr,
 | 
			
		||||
						 GFP_ATOMIC);
 | 
			
		||||
		if (!ctx->dec_cd) {
 | 
			
		||||
| 
						 | 
				
			
			@ -933,14 +933,14 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
 | 
			
		|||
 | 
			
		||||
		dev = &GET_DEV(inst->accel_dev);
 | 
			
		||||
		ctx->inst = inst;
 | 
			
		||||
		ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
 | 
			
		||||
		ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
 | 
			
		||||
						 &ctx->enc_cd_paddr,
 | 
			
		||||
						 GFP_ATOMIC);
 | 
			
		||||
		if (!ctx->enc_cd) {
 | 
			
		||||
			spin_unlock(&ctx->lock);
 | 
			
		||||
			return -ENOMEM;
 | 
			
		||||
		}
 | 
			
		||||
		ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
 | 
			
		||||
		ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
 | 
			
		||||
						 &ctx->dec_cd_paddr,
 | 
			
		||||
						 GFP_ATOMIC);
 | 
			
		||||
		if (!ctx->dec_cd) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -332,7 +332,7 @@ static int qat_dh_compute_value(struct kpp_request *req)
 | 
			
		|||
		} else {
 | 
			
		||||
			int shift = ctx->p_size - req->src_len;
 | 
			
		||||
 | 
			
		||||
			qat_req->src_align = dma_zalloc_coherent(dev,
 | 
			
		||||
			qat_req->src_align = dma_alloc_coherent(dev,
 | 
			
		||||
								ctx->p_size,
 | 
			
		||||
								&qat_req->in.dh.in.b,
 | 
			
		||||
								GFP_KERNEL);
 | 
			
		||||
| 
						 | 
				
			
			@ -360,7 +360,7 @@ static int qat_dh_compute_value(struct kpp_request *req)
 | 
			
		|||
			goto unmap_src;
 | 
			
		||||
 | 
			
		||||
	} else {
 | 
			
		||||
		qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size,
 | 
			
		||||
		qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
 | 
			
		||||
							&qat_req->out.dh.r,
 | 
			
		||||
							GFP_KERNEL);
 | 
			
		||||
		if (unlikely(!qat_req->dst_align))
 | 
			
		||||
| 
						 | 
				
			
			@ -447,7 +447,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
 | 
			
		|||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	ctx->p_size = params->p_size;
 | 
			
		||||
	ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
 | 
			
		||||
	ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
 | 
			
		||||
	if (!ctx->p)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	memcpy(ctx->p, params->p, ctx->p_size);
 | 
			
		||||
| 
						 | 
				
			
			@ -458,7 +458,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
 | 
			
		|||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
 | 
			
		||||
	ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
 | 
			
		||||
	if (!ctx->g)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
 | 
			
		||||
| 
						 | 
				
			
			@ -503,7 +503,7 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
 | 
			
		|||
	if (ret < 0)
 | 
			
		||||
		goto err_clear_ctx;
 | 
			
		||||
 | 
			
		||||
	ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
 | 
			
		||||
	ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
 | 
			
		||||
				     GFP_KERNEL);
 | 
			
		||||
	if (!ctx->xa) {
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -737,7 +737,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
 | 
			
		|||
	} else {
 | 
			
		||||
		int shift = ctx->key_sz - req->src_len;
 | 
			
		||||
 | 
			
		||||
		qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
 | 
			
		||||
		qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
 | 
			
		||||
							&qat_req->in.rsa.enc.m,
 | 
			
		||||
							GFP_KERNEL);
 | 
			
		||||
		if (unlikely(!qat_req->src_align))
 | 
			
		||||
| 
						 | 
				
			
			@ -756,7 +756,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
 | 
			
		|||
			goto unmap_src;
 | 
			
		||||
 | 
			
		||||
	} else {
 | 
			
		||||
		qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
 | 
			
		||||
		qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
 | 
			
		||||
							&qat_req->out.rsa.enc.c,
 | 
			
		||||
							GFP_KERNEL);
 | 
			
		||||
		if (unlikely(!qat_req->dst_align))
 | 
			
		||||
| 
						 | 
				
			
			@ -881,7 +881,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
 | 
			
		|||
	} else {
 | 
			
		||||
		int shift = ctx->key_sz - req->src_len;
 | 
			
		||||
 | 
			
		||||
		qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
 | 
			
		||||
		qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
 | 
			
		||||
							&qat_req->in.rsa.dec.c,
 | 
			
		||||
							GFP_KERNEL);
 | 
			
		||||
		if (unlikely(!qat_req->src_align))
 | 
			
		||||
| 
						 | 
				
			
			@ -900,7 +900,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
 | 
			
		|||
			goto unmap_src;
 | 
			
		||||
 | 
			
		||||
	} else {
 | 
			
		||||
		qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
 | 
			
		||||
		qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
 | 
			
		||||
							&qat_req->out.rsa.dec.m,
 | 
			
		||||
							GFP_KERNEL);
 | 
			
		||||
		if (unlikely(!qat_req->dst_align))
 | 
			
		||||
| 
						 | 
				
			
			@ -989,7 +989,7 @@ static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
 | 
			
		|||
		goto err;
 | 
			
		||||
 | 
			
		||||
	ret = -ENOMEM;
 | 
			
		||||
	ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
 | 
			
		||||
	ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
 | 
			
		||||
	if (!ctx->n)
 | 
			
		||||
		goto err;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1018,7 +1018,7 @@ static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
 | 
			
		|||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
 | 
			
		||||
	ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
 | 
			
		||||
	if (!ctx->e)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1044,7 +1044,7 @@ static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
 | 
			
		|||
		goto err;
 | 
			
		||||
 | 
			
		||||
	ret = -ENOMEM;
 | 
			
		||||
	ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
 | 
			
		||||
	ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
 | 
			
		||||
	if (!ctx->d)
 | 
			
		||||
		goto err;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1077,7 +1077,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
 | 
			
		|||
	qat_rsa_drop_leading_zeros(&ptr, &len);
 | 
			
		||||
	if (!len)
 | 
			
		||||
		goto err;
 | 
			
		||||
	ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
 | 
			
		||||
	ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
 | 
			
		||||
	if (!ctx->p)
 | 
			
		||||
		goto err;
 | 
			
		||||
	memcpy(ctx->p + (half_key_sz - len), ptr, len);
 | 
			
		||||
| 
						 | 
				
			
			@ -1088,7 +1088,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
 | 
			
		|||
	qat_rsa_drop_leading_zeros(&ptr, &len);
 | 
			
		||||
	if (!len)
 | 
			
		||||
		goto free_p;
 | 
			
		||||
	ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
 | 
			
		||||
	ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
 | 
			
		||||
	if (!ctx->q)
 | 
			
		||||
		goto free_p;
 | 
			
		||||
	memcpy(ctx->q + (half_key_sz - len), ptr, len);
 | 
			
		||||
| 
						 | 
				
			
			@ -1099,7 +1099,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
 | 
			
		|||
	qat_rsa_drop_leading_zeros(&ptr, &len);
 | 
			
		||||
	if (!len)
 | 
			
		||||
		goto free_q;
 | 
			
		||||
	ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp,
 | 
			
		||||
	ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
 | 
			
		||||
				     GFP_KERNEL);
 | 
			
		||||
	if (!ctx->dp)
 | 
			
		||||
		goto free_q;
 | 
			
		||||
| 
						 | 
				
			
			@ -1111,7 +1111,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
 | 
			
		|||
	qat_rsa_drop_leading_zeros(&ptr, &len);
 | 
			
		||||
	if (!len)
 | 
			
		||||
		goto free_dp;
 | 
			
		||||
	ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq,
 | 
			
		||||
	ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
 | 
			
		||||
				     GFP_KERNEL);
 | 
			
		||||
	if (!ctx->dq)
 | 
			
		||||
		goto free_dp;
 | 
			
		||||
| 
						 | 
				
			
			@ -1123,7 +1123,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
 | 
			
		|||
	qat_rsa_drop_leading_zeros(&ptr, &len);
 | 
			
		||||
	if (!len)
 | 
			
		||||
		goto free_dq;
 | 
			
		||||
	ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
 | 
			
		||||
	ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
 | 
			
		||||
				       GFP_KERNEL);
 | 
			
		||||
	if (!ctx->qinv)
 | 
			
		||||
		goto free_dq;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1182,7 +1182,7 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
 | 
			
		|||
{
 | 
			
		||||
	int ret = -EBUSY;
 | 
			
		||||
 | 
			
		||||
	sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
 | 
			
		||||
	sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
 | 
			
		||||
				       GFP_NOWAIT);
 | 
			
		||||
	if (!sdma->bd0) {
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -1205,7 +1205,7 @@ static int sdma_alloc_bd(struct sdma_desc *desc)
 | 
			
		|||
	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
 | 
			
		||||
	int ret = 0;
 | 
			
		||||
 | 
			
		||||
	desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
 | 
			
		||||
	desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys,
 | 
			
		||||
				      GFP_NOWAIT);
 | 
			
		||||
	if (!desc->bd) {
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -325,7 +325,7 @@ static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma,
 | 
			
		|||
	 * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring.
 | 
			
		||||
	 */
 | 
			
		||||
	pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd);
 | 
			
		||||
	ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
 | 
			
		||||
	ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
 | 
			
		||||
				       &ring->tphys, GFP_NOWAIT);
 | 
			
		||||
	if (!ring->txd)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -416,7 +416,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
 | 
			
		|||
	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev,
 | 
			
		||||
	mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
 | 
			
		||||
					   CCW_BLOCK_SIZE,
 | 
			
		||||
					   &mxs_chan->ccw_phys, GFP_KERNEL);
 | 
			
		||||
	if (!mxs_chan->ccw) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1208,7 +1208,7 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
 | 
			
		|||
	ring->size = ret;
 | 
			
		||||
 | 
			
		||||
	/* Allocate memory for DMA ring descriptor */
 | 
			
		||||
	ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
 | 
			
		||||
	ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size,
 | 
			
		||||
					      &ring->desc_paddr, GFP_KERNEL);
 | 
			
		||||
	if (!ring->desc_vaddr) {
 | 
			
		||||
		chan_err(chan, "Failed to allocate ring desc\n");
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -879,9 +879,8 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
 | 
			
		|||
	 */
 | 
			
		||||
	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
 | 
			
		||||
		/* Allocate the buffer descriptors. */
 | 
			
		||||
		chan->seg_v = dma_zalloc_coherent(chan->dev,
 | 
			
		||||
						  sizeof(*chan->seg_v) *
 | 
			
		||||
						  XILINX_DMA_NUM_DESCS,
 | 
			
		||||
		chan->seg_v = dma_alloc_coherent(chan->dev,
 | 
			
		||||
						 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
 | 
			
		||||
						 &chan->seg_p, GFP_KERNEL);
 | 
			
		||||
		if (!chan->seg_v) {
 | 
			
		||||
			dev_err(chan->dev,
 | 
			
		||||
| 
						 | 
				
			
			@ -895,9 +894,10 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
 | 
			
		|||
		 * so allocating a desc segment during channel allocation for
 | 
			
		||||
		 * programming tail descriptor.
 | 
			
		||||
		 */
 | 
			
		||||
		chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
 | 
			
		||||
		chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
 | 
			
		||||
							sizeof(*chan->cyclic_seg_v),
 | 
			
		||||
					&chan->cyclic_seg_p, GFP_KERNEL);
 | 
			
		||||
							&chan->cyclic_seg_p,
 | 
			
		||||
							GFP_KERNEL);
 | 
			
		||||
		if (!chan->cyclic_seg_v) {
 | 
			
		||||
			dev_err(chan->dev,
 | 
			
		||||
				"unable to allocate desc segment for cyclic DMA\n");
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -490,7 +490,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
 | 
			
		|||
		list_add_tail(&desc->node, &chan->free_list);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	chan->desc_pool_v = dma_zalloc_coherent(chan->dev,
 | 
			
		||||
	chan->desc_pool_v = dma_alloc_coherent(chan->dev,
 | 
			
		||||
					       (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
 | 
			
		||||
					       &chan->desc_pool_p, GFP_KERNEL);
 | 
			
		||||
	if (!chan->desc_pool_v)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -61,7 +61,8 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
 | 
			
		|||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	dmah->size = size;
 | 
			
		||||
	dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size, &dmah->busaddr,
 | 
			
		||||
	dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
 | 
			
		||||
					 &dmah->busaddr,
 | 
			
		||||
					 GFP_KERNEL | __GFP_COMP);
 | 
			
		||||
 | 
			
		||||
	if (dmah->vaddr == NULL) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -766,7 +766,7 @@ struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
 | 
			
		|||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	sbuf->size = size;
 | 
			
		||||
	sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
 | 
			
		||||
	sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size,
 | 
			
		||||
				      &sbuf->dma_addr, GFP_ATOMIC);
 | 
			
		||||
	if (!sbuf->sb)
 | 
			
		||||
		goto bail;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -105,7 +105,7 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
 | 
			
		|||
 | 
			
		||||
	if (!sghead) {
 | 
			
		||||
		for (i = 0; i < pages; i++) {
 | 
			
		||||
			pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
 | 
			
		||||
			pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
 | 
			
		||||
							    pbl->pg_size,
 | 
			
		||||
							    &pbl->pg_map_arr[i],
 | 
			
		||||
							    GFP_KERNEL);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -291,7 +291,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
 | 
			
		|||
	if (!wq->sq)
 | 
			
		||||
		goto err3;
 | 
			
		||||
 | 
			
		||||
	wq->queue = dma_zalloc_coherent(&(rdev_p->rnic_info.pdev->dev),
 | 
			
		||||
	wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
 | 
			
		||||
				       depth * sizeof(union t3_wr),
 | 
			
		||||
				       &(wq->dma_addr), GFP_KERNEL);
 | 
			
		||||
	if (!wq->queue)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2564,9 +2564,8 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
 | 
			
		|||
	wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
 | 
			
		||||
		T4_RQT_ENTRY_SHIFT;
 | 
			
		||||
 | 
			
		||||
	wq->queue = dma_zalloc_coherent(&rdev->lldi.pdev->dev,
 | 
			
		||||
				       wq->memsize, &wq->dma_addr,
 | 
			
		||||
			GFP_KERNEL);
 | 
			
		||||
	wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize,
 | 
			
		||||
				       &wq->dma_addr, GFP_KERNEL);
 | 
			
		||||
	if (!wq->queue)
 | 
			
		||||
		goto err_free_rqtpool;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -899,8 +899,8 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
 | 
			
		|||
		goto done;
 | 
			
		||||
 | 
			
		||||
	/* allocate dummy tail memory for all receive contexts */
 | 
			
		||||
	dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
 | 
			
		||||
		&dd->pcidev->dev, sizeof(u64),
 | 
			
		||||
	dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
 | 
			
		||||
							 sizeof(u64),
 | 
			
		||||
							 &dd->rcvhdrtail_dummy_dma,
 | 
			
		||||
							 GFP_KERNEL);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1863,8 +1863,8 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
 | 
			
		|||
			gfp_flags = GFP_KERNEL;
 | 
			
		||||
		else
 | 
			
		||||
			gfp_flags = GFP_USER;
 | 
			
		||||
		rcd->rcvhdrq = dma_zalloc_coherent(
 | 
			
		||||
			&dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
 | 
			
		||||
		rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
 | 
			
		||||
						  &rcd->rcvhdrq_dma,
 | 
			
		||||
						  gfp_flags | __GFP_COMP);
 | 
			
		||||
 | 
			
		||||
		if (!rcd->rcvhdrq) {
 | 
			
		||||
| 
						 | 
				
			
			@ -1876,9 +1876,10 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
 | 
			
		|||
 | 
			
		||||
		if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
 | 
			
		||||
		    HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
 | 
			
		||||
			rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
 | 
			
		||||
				&dd->pcidev->dev, PAGE_SIZE,
 | 
			
		||||
				&rcd->rcvhdrqtailaddr_dma, gfp_flags);
 | 
			
		||||
			rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
 | 
			
		||||
								    PAGE_SIZE,
 | 
			
		||||
								    &rcd->rcvhdrqtailaddr_dma,
 | 
			
		||||
								    gfp_flags);
 | 
			
		||||
			if (!rcd->rcvhdrtail_kvaddr)
 | 
			
		||||
				goto bail_free;
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -1974,7 +1975,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
 | 
			
		|||
	while (alloced_bytes < rcd->egrbufs.size &&
 | 
			
		||||
	       rcd->egrbufs.alloced < rcd->egrbufs.count) {
 | 
			
		||||
		rcd->egrbufs.buffers[idx].addr =
 | 
			
		||||
			dma_zalloc_coherent(&dd->pcidev->dev,
 | 
			
		||||
			dma_alloc_coherent(&dd->pcidev->dev,
 | 
			
		||||
					   rcd->egrbufs.rcvtid_size,
 | 
			
		||||
					   &rcd->egrbufs.buffers[idx].dma,
 | 
			
		||||
					   gfp_flags);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2098,8 +2098,7 @@ int init_credit_return(struct hfi1_devdata *dd)
 | 
			
		|||
		int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
 | 
			
		||||
 | 
			
		||||
		set_dev_node(&dd->pcidev->dev, i);
 | 
			
		||||
		dd->cr_base[i].va = dma_zalloc_coherent(
 | 
			
		||||
					&dd->pcidev->dev,
 | 
			
		||||
		dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev,
 | 
			
		||||
						       bytes,
 | 
			
		||||
						       &dd->cr_base[i].dma,
 | 
			
		||||
						       GFP_KERNEL);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1453,12 +1453,9 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
 | 
			
		|||
		timer_setup(&sde->err_progress_check_timer,
 | 
			
		||||
			    sdma_err_progress_check, 0);
 | 
			
		||||
 | 
			
		||||
		sde->descq = dma_zalloc_coherent(
 | 
			
		||||
			&dd->pcidev->dev,
 | 
			
		||||
		sde->descq = dma_alloc_coherent(&dd->pcidev->dev,
 | 
			
		||||
						descq_cnt * sizeof(u64[2]),
 | 
			
		||||
			&sde->descq_phys,
 | 
			
		||||
			GFP_KERNEL
 | 
			
		||||
		);
 | 
			
		||||
						&sde->descq_phys, GFP_KERNEL);
 | 
			
		||||
		if (!sde->descq)
 | 
			
		||||
			goto bail;
 | 
			
		||||
		sde->tx_ring =
 | 
			
		||||
| 
						 | 
				
			
			@ -1471,24 +1468,18 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
 | 
			
		|||
 | 
			
		||||
	dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
 | 
			
		||||
	/* Allocate memory for DMA of head registers to memory */
 | 
			
		||||
	dd->sdma_heads_dma = dma_zalloc_coherent(
 | 
			
		||||
		&dd->pcidev->dev,
 | 
			
		||||
	dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev,
 | 
			
		||||
						dd->sdma_heads_size,
 | 
			
		||||
						&dd->sdma_heads_phys,
 | 
			
		||||
		GFP_KERNEL
 | 
			
		||||
	);
 | 
			
		||||
						GFP_KERNEL);
 | 
			
		||||
	if (!dd->sdma_heads_dma) {
 | 
			
		||||
		dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
 | 
			
		||||
		goto bail;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Allocate memory for pad */
 | 
			
		||||
	dd->sdma_pad_dma = dma_zalloc_coherent(
 | 
			
		||||
		&dd->pcidev->dev,
 | 
			
		||||
		sizeof(u32),
 | 
			
		||||
		&dd->sdma_pad_phys,
 | 
			
		||||
		GFP_KERNEL
 | 
			
		||||
	);
 | 
			
		||||
	dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
 | 
			
		||||
					      &dd->sdma_pad_phys, GFP_KERNEL);
 | 
			
		||||
	if (!dd->sdma_pad_dma) {
 | 
			
		||||
		dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
 | 
			
		||||
		goto bail;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -197,8 +197,8 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
 | 
			
		|||
		buf->npages = 1 << order;
 | 
			
		||||
		buf->page_shift = page_shift;
 | 
			
		||||
		/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
 | 
			
		||||
		buf->direct.buf = dma_zalloc_coherent(dev,
 | 
			
		||||
						      size, &t, GFP_KERNEL);
 | 
			
		||||
		buf->direct.buf = dma_alloc_coherent(dev, size, &t,
 | 
			
		||||
						     GFP_KERNEL);
 | 
			
		||||
		if (!buf->direct.buf)
 | 
			
		||||
			return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -219,8 +219,9 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
 | 
			
		|||
			return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
		for (i = 0; i < buf->nbufs; ++i) {
 | 
			
		||||
			buf->page_list[i].buf = dma_zalloc_coherent(dev,
 | 
			
		||||
								  page_size, &t,
 | 
			
		||||
			buf->page_list[i].buf = dma_alloc_coherent(dev,
 | 
			
		||||
								   page_size,
 | 
			
		||||
								   &t,
 | 
			
		||||
								   GFP_KERNEL);
 | 
			
		||||
 | 
			
		||||
			if (!buf->page_list[i].buf)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5091,7 +5091,7 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
 | 
			
		|||
				eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
 | 
			
		||||
				size = (eq->entries - eqe_alloc) * eq->eqe_size;
 | 
			
		||||
			}
 | 
			
		||||
			eq->buf[i] = dma_zalloc_coherent(dev, size,
 | 
			
		||||
			eq->buf[i] = dma_alloc_coherent(dev, size,
 | 
			
		||||
							&(eq->buf_dma[i]),
 | 
			
		||||
							GFP_KERNEL);
 | 
			
		||||
			if (!eq->buf[i])
 | 
			
		||||
| 
						 | 
				
			
			@ -5126,7 +5126,7 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
 | 
			
		|||
					size = (eq->entries - eqe_alloc)
 | 
			
		||||
						* eq->eqe_size;
 | 
			
		||||
				}
 | 
			
		||||
				eq->buf[idx] = dma_zalloc_coherent(dev, size,
 | 
			
		||||
				eq->buf[idx] = dma_alloc_coherent(dev, size,
 | 
			
		||||
								  &(eq->buf_dma[idx]),
 | 
			
		||||
								  GFP_KERNEL);
 | 
			
		||||
				if (!eq->buf[idx])
 | 
			
		||||
| 
						 | 
				
			
			@ -5241,7 +5241,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
 | 
			
		|||
			goto free_cmd_mbox;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		eq->buf_list->buf = dma_zalloc_coherent(dev, buf_chk_sz,
 | 
			
		||||
		eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
 | 
			
		||||
						       &(eq->buf_list->map),
 | 
			
		||||
						       GFP_KERNEL);
 | 
			
		||||
		if (!eq->buf_list->buf) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -745,7 +745,7 @@ enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
 | 
			
		|||
	if (!mem)
 | 
			
		||||
		return I40IW_ERR_PARAM;
 | 
			
		||||
	mem->size = ALIGN(size, alignment);
 | 
			
		||||
	mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size,
 | 
			
		||||
	mem->va = dma_alloc_coherent(&pcidev->dev, mem->size,
 | 
			
		||||
				     (dma_addr_t *)&mem->pa, GFP_KERNEL);
 | 
			
		||||
	if (!mem->va)
 | 
			
		||||
		return I40IW_ERR_NO_MEMORY;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -623,8 +623,9 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
 | 
			
		|||
	page = dev->db_tab->page + end;
 | 
			
		||||
 | 
			
		||||
alloc:
 | 
			
		||||
	page->db_rec = dma_zalloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
 | 
			
		||||
					   &page->mapping, GFP_KERNEL);
 | 
			
		||||
	page->db_rec = dma_alloc_coherent(&dev->pdev->dev,
 | 
			
		||||
					  MTHCA_ICM_PAGE_SIZE, &page->mapping,
 | 
			
		||||
					  GFP_KERNEL);
 | 
			
		||||
	if (!page->db_rec) {
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
		goto out;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -380,8 +380,8 @@ static int ocrdma_alloc_q(struct ocrdma_dev *dev,
 | 
			
		|||
	q->len = len;
 | 
			
		||||
	q->entry_size = entry_size;
 | 
			
		||||
	q->size = len * entry_size;
 | 
			
		||||
	q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size,
 | 
			
		||||
				    &q->dma, GFP_KERNEL);
 | 
			
		||||
	q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma,
 | 
			
		||||
				   GFP_KERNEL);
 | 
			
		||||
	if (!q->va)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -1819,7 +1819,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
 | 
			
		|||
		return -ENOMEM;
 | 
			
		||||
	ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
 | 
			
		||||
			OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
 | 
			
		||||
	cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
 | 
			
		||||
	cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
 | 
			
		||||
	if (!cq->va) {
 | 
			
		||||
		status = -ENOMEM;
 | 
			
		||||
		goto mem_err;
 | 
			
		||||
| 
						 | 
				
			
			@ -2209,7 +2209,7 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
 | 
			
		|||
	qp->sq.max_cnt = max_wqe_allocated;
 | 
			
		||||
	len = (hw_pages * hw_page_size);
 | 
			
		||||
 | 
			
		||||
	qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
 | 
			
		||||
	qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
 | 
			
		||||
	if (!qp->sq.va)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	qp->sq.len = len;
 | 
			
		||||
| 
						 | 
				
			
			@ -2259,7 +2259,7 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
 | 
			
		|||
	qp->rq.max_cnt = max_rqe_allocated;
 | 
			
		||||
	len = (hw_pages * hw_page_size);
 | 
			
		||||
 | 
			
		||||
	qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
 | 
			
		||||
	qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
 | 
			
		||||
	if (!qp->rq.va)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	qp->rq.pa = pa;
 | 
			
		||||
| 
						 | 
				
			
			@ -2315,7 +2315,7 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
 | 
			
		|||
	if (dev->attr.ird == 0)
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa,
 | 
			
		||||
	qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa,
 | 
			
		||||
					  GFP_KERNEL);
 | 
			
		||||
	if (!qp->ird_q_va)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -73,7 +73,7 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
 | 
			
		|||
	mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
 | 
			
		||||
			sizeof(struct ocrdma_rdma_stats_resp));
 | 
			
		||||
 | 
			
		||||
	mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size,
 | 
			
		||||
	mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
 | 
			
		||||
				     &mem->pa, GFP_KERNEL);
 | 
			
		||||
	if (!mem->va) {
 | 
			
		||||
		pr_err("%s: stats mbox allocation failed\n", __func__);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -504,7 +504,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
 | 
			
		|||
	INIT_LIST_HEAD(&ctx->mm_head);
 | 
			
		||||
	mutex_init(&ctx->mm_list_lock);
 | 
			
		||||
 | 
			
		||||
	ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len,
 | 
			
		||||
	ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
 | 
			
		||||
					    &ctx->ah_tbl.pa, GFP_KERNEL);
 | 
			
		||||
	if (!ctx->ah_tbl.va) {
 | 
			
		||||
		kfree(ctx);
 | 
			
		||||
| 
						 | 
				
			
			@ -838,7 +838,7 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
 | 
			
		|||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < mr->num_pbls; i++) {
 | 
			
		||||
		va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
 | 
			
		||||
		va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
 | 
			
		||||
		if (!va) {
 | 
			
		||||
			ocrdma_free_mr_pbl_tbl(dev, mr);
 | 
			
		||||
			status = -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -556,8 +556,8 @@ static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
 | 
			
		|||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < pbl_info->num_pbls; i++) {
 | 
			
		||||
		va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size,
 | 
			
		||||
					 &pa, flags);
 | 
			
		||||
		va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
 | 
			
		||||
					flags);
 | 
			
		||||
		if (!va)
 | 
			
		||||
			goto err;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -890,7 +890,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
 | 
			
		|||
	dev_info(&pdev->dev, "device version %d, driver version %d\n",
 | 
			
		||||
		 dev->dsr_version, PVRDMA_VERSION);
 | 
			
		||||
 | 
			
		||||
	dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr),
 | 
			
		||||
	dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
 | 
			
		||||
				      &dev->dsrbase, GFP_KERNEL);
 | 
			
		||||
	if (!dev->dsr) {
 | 
			
		||||
		dev_err(&pdev->dev, "failed to allocate shared region\n");
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -147,7 +147,7 @@ static int rpi_ts_probe(struct platform_device *pdev)
 | 
			
		|||
		return -ENOMEM;
 | 
			
		||||
	ts->pdev = pdev;
 | 
			
		||||
 | 
			
		||||
	ts->fw_regs_va = dma_zalloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys,
 | 
			
		||||
	ts->fw_regs_va = dma_alloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys,
 | 
			
		||||
					    GFP_KERNEL);
 | 
			
		||||
	if (!ts->fw_regs_va) {
 | 
			
		||||
		dev_err(dev, "failed to dma_alloc_coherent\n");
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -232,8 +232,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
 | 
			
		|||
 | 
			
		||||
	spin_lock_init(&dom->pgtlock);
 | 
			
		||||
 | 
			
		||||
	dom->pgt_va = dma_zalloc_coherent(data->dev,
 | 
			
		||||
				M2701_IOMMU_PGT_SIZE,
 | 
			
		||||
	dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
 | 
			
		||||
					 &dom->pgt_pa, GFP_KERNEL);
 | 
			
		||||
	if (!dom->pgt_va)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -218,7 +218,7 @@ static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
 | 
			
		|||
{
 | 
			
		||||
	struct device *dev = &cio2->pci_dev->dev;
 | 
			
		||||
 | 
			
		||||
	q->fbpt = dma_zalloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
 | 
			
		||||
	q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
 | 
			
		||||
				     GFP_KERNEL);
 | 
			
		||||
	if (!q->fbpt)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -49,7 +49,7 @@ int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
 | 
			
		|||
	struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
 | 
			
		||||
	struct device *dev = &ctx->dev->plat_dev->dev;
 | 
			
		||||
 | 
			
		||||
	mem->va = dma_zalloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
 | 
			
		||||
	mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
 | 
			
		||||
	if (!mem->va) {
 | 
			
		||||
		mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev),
 | 
			
		||||
			     size);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -218,7 +218,7 @@ void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
 | 
			
		|||
	if (get_order(size) >= MAX_ORDER)
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle,
 | 
			
		||||
	return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
 | 
			
		||||
				  GFP_KERNEL);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3763,8 +3763,9 @@ int sdhci_setup_host(struct sdhci_host *host)
 | 
			
		|||
		 * Use zalloc to zero the reserved high 32-bits of 128-bit
 | 
			
		||||
		 * descriptors so that they never need to be written.
 | 
			
		||||
		 */
 | 
			
		||||
		buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
 | 
			
		||||
					 host->adma_table_sz, &dma, GFP_KERNEL);
 | 
			
		||||
		buf = dma_alloc_coherent(mmc_dev(mmc),
 | 
			
		||||
					 host->align_buffer_sz + host->adma_table_sz,
 | 
			
		||||
					 &dma, GFP_KERNEL);
 | 
			
		||||
		if (!buf) {
 | 
			
		||||
			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
 | 
			
		||||
				mmc_hostname(mmc));
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1433,7 +1433,7 @@ static int greth_of_probe(struct platform_device *ofdev)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	/* Allocate TX descriptor ring in coherent memory */
 | 
			
		||||
	greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
 | 
			
		||||
	greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
 | 
			
		||||
					       &greth->tx_bd_base_phys,
 | 
			
		||||
					       GFP_KERNEL);
 | 
			
		||||
	if (!greth->tx_bd_base) {
 | 
			
		||||
| 
						 | 
				
			
			@ -1442,7 +1442,7 @@ static int greth_of_probe(struct platform_device *ofdev)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	/* Allocate RX descriptor ring in coherent memory */
 | 
			
		||||
	greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
 | 
			
		||||
	greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
 | 
			
		||||
					       &greth->rx_bd_base_phys,
 | 
			
		||||
					       GFP_KERNEL);
 | 
			
		||||
	if (!greth->rx_bd_base) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -795,7 +795,7 @@ static int slic_init_stat_queue(struct slic_device *sdev)
 | 
			
		|||
	size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) {
 | 
			
		||||
		descs = dma_zalloc_coherent(&sdev->pdev->dev, size, &paddr,
 | 
			
		||||
		descs = dma_alloc_coherent(&sdev->pdev->dev, size, &paddr,
 | 
			
		||||
					   GFP_KERNEL);
 | 
			
		||||
		if (!descs) {
 | 
			
		||||
			netdev_err(sdev->netdev,
 | 
			
		||||
| 
						 | 
				
			
			@ -1240,7 +1240,7 @@ static int slic_init_shmem(struct slic_device *sdev)
 | 
			
		|||
	struct slic_shmem_data *sm_data;
 | 
			
		||||
	dma_addr_t paddr;
 | 
			
		||||
 | 
			
		||||
	sm_data = dma_zalloc_coherent(&sdev->pdev->dev, sizeof(*sm_data),
 | 
			
		||||
	sm_data = dma_alloc_coherent(&sdev->pdev->dev, sizeof(*sm_data),
 | 
			
		||||
				     &paddr, GFP_KERNEL);
 | 
			
		||||
	if (!sm_data) {
 | 
			
		||||
		dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n");
 | 
			
		||||
| 
						 | 
				
			
			@ -1621,7 +1621,7 @@ static int slic_read_eeprom(struct slic_device *sdev)
 | 
			
		|||
	int err = 0;
 | 
			
		||||
	u8 *mac[2];
 | 
			
		||||
 | 
			
		||||
	eeprom = dma_zalloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE,
 | 
			
		||||
	eeprom = dma_alloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE,
 | 
			
		||||
				    &paddr, GFP_KERNEL);
 | 
			
		||||
	if (!eeprom)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -111,7 +111,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
 | 
			
		|||
	struct ena_com_admin_sq *sq = &queue->sq;
 | 
			
		||||
	u16 size = ADMIN_SQ_SIZE(queue->q_depth);
 | 
			
		||||
 | 
			
		||||
	sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
 | 
			
		||||
	sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
 | 
			
		||||
					 GFP_KERNEL);
 | 
			
		||||
 | 
			
		||||
	if (!sq->entries) {
 | 
			
		||||
| 
						 | 
				
			
			@ -133,7 +133,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
 | 
			
		|||
	struct ena_com_admin_cq *cq = &queue->cq;
 | 
			
		||||
	u16 size = ADMIN_CQ_SIZE(queue->q_depth);
 | 
			
		||||
 | 
			
		||||
	cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
 | 
			
		||||
	cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
 | 
			
		||||
					 GFP_KERNEL);
 | 
			
		||||
 | 
			
		||||
	if (!cq->entries) {
 | 
			
		||||
| 
						 | 
				
			
			@ -156,7 +156,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
 | 
			
		|||
 | 
			
		||||
	dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
 | 
			
		||||
	size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
 | 
			
		||||
	aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
 | 
			
		||||
	aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
 | 
			
		||||
					   GFP_KERNEL);
 | 
			
		||||
 | 
			
		||||
	if (!aenq->entries) {
 | 
			
		||||
| 
						 | 
				
			
			@ -344,13 +344,13 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
 | 
			
		|||
		dev_node = dev_to_node(ena_dev->dmadev);
 | 
			
		||||
		set_dev_node(ena_dev->dmadev, ctx->numa_node);
 | 
			
		||||
		io_sq->desc_addr.virt_addr =
 | 
			
		||||
			dma_zalloc_coherent(ena_dev->dmadev, size,
 | 
			
		||||
			dma_alloc_coherent(ena_dev->dmadev, size,
 | 
			
		||||
					   &io_sq->desc_addr.phys_addr,
 | 
			
		||||
					   GFP_KERNEL);
 | 
			
		||||
		set_dev_node(ena_dev->dmadev, dev_node);
 | 
			
		||||
		if (!io_sq->desc_addr.virt_addr) {
 | 
			
		||||
			io_sq->desc_addr.virt_addr =
 | 
			
		||||
				dma_zalloc_coherent(ena_dev->dmadev, size,
 | 
			
		||||
				dma_alloc_coherent(ena_dev->dmadev, size,
 | 
			
		||||
						   &io_sq->desc_addr.phys_addr,
 | 
			
		||||
						   GFP_KERNEL);
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -425,12 +425,12 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
 | 
			
		|||
	prev_node = dev_to_node(ena_dev->dmadev);
 | 
			
		||||
	set_dev_node(ena_dev->dmadev, ctx->numa_node);
 | 
			
		||||
	io_cq->cdesc_addr.virt_addr =
 | 
			
		||||
		dma_zalloc_coherent(ena_dev->dmadev, size,
 | 
			
		||||
		dma_alloc_coherent(ena_dev->dmadev, size,
 | 
			
		||||
				   &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
 | 
			
		||||
	set_dev_node(ena_dev->dmadev, prev_node);
 | 
			
		||||
	if (!io_cq->cdesc_addr.virt_addr) {
 | 
			
		||||
		io_cq->cdesc_addr.virt_addr =
 | 
			
		||||
			dma_zalloc_coherent(ena_dev->dmadev, size,
 | 
			
		||||
			dma_alloc_coherent(ena_dev->dmadev, size,
 | 
			
		||||
					   &io_cq->cdesc_addr.phys_addr,
 | 
			
		||||
					   GFP_KERNEL);
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -1026,7 +1026,7 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
 | 
			
		|||
	struct ena_rss *rss = &ena_dev->rss;
 | 
			
		||||
 | 
			
		||||
	rss->hash_key =
 | 
			
		||||
		dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
 | 
			
		||||
		dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
 | 
			
		||||
				   &rss->hash_key_dma_addr, GFP_KERNEL);
 | 
			
		||||
 | 
			
		||||
	if (unlikely(!rss->hash_key))
 | 
			
		||||
| 
						 | 
				
			
			@ -1050,7 +1050,7 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
 | 
			
		|||
	struct ena_rss *rss = &ena_dev->rss;
 | 
			
		||||
 | 
			
		||||
	rss->hash_ctrl =
 | 
			
		||||
		dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
 | 
			
		||||
		dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
 | 
			
		||||
				   &rss->hash_ctrl_dma_addr, GFP_KERNEL);
 | 
			
		||||
 | 
			
		||||
	if (unlikely(!rss->hash_ctrl))
 | 
			
		||||
| 
						 | 
				
			
			@ -1094,7 +1094,7 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
 | 
			
		|||
		sizeof(struct ena_admin_rss_ind_table_entry);
 | 
			
		||||
 | 
			
		||||
	rss->rss_ind_tbl =
 | 
			
		||||
		dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
 | 
			
		||||
		dma_alloc_coherent(ena_dev->dmadev, tbl_size,
 | 
			
		||||
				   &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
 | 
			
		||||
	if (unlikely(!rss->rss_ind_tbl))
 | 
			
		||||
		goto mem_err1;
 | 
			
		||||
| 
						 | 
				
			
			@ -1649,7 +1649,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
 | 
			
		|||
 | 
			
		||||
	spin_lock_init(&mmio_read->lock);
 | 
			
		||||
	mmio_read->read_resp =
 | 
			
		||||
		dma_zalloc_coherent(ena_dev->dmadev,
 | 
			
		||||
		dma_alloc_coherent(ena_dev->dmadev,
 | 
			
		||||
				   sizeof(*mmio_read->read_resp),
 | 
			
		||||
				   &mmio_read->read_resp_dma_addr, GFP_KERNEL);
 | 
			
		||||
	if (unlikely(!mmio_read->read_resp))
 | 
			
		||||
| 
						 | 
				
			
			@ -2623,7 +2623,7 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
 | 
			
		|||
	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
 | 
			
		||||
 | 
			
		||||
	host_attr->host_info =
 | 
			
		||||
		dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
 | 
			
		||||
		dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
 | 
			
		||||
				   &host_attr->host_info_dma_addr, GFP_KERNEL);
 | 
			
		||||
	if (unlikely(!host_attr->host_info))
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -2641,8 +2641,9 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
 | 
			
		|||
	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
 | 
			
		||||
 | 
			
		||||
	host_attr->debug_area_virt_addr =
 | 
			
		||||
		dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
 | 
			
		||||
				    &host_attr->debug_area_dma_addr, GFP_KERNEL);
 | 
			
		||||
		dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
 | 
			
		||||
				   &host_attr->debug_area_dma_addr,
 | 
			
		||||
				   GFP_KERNEL);
 | 
			
		||||
	if (unlikely(!host_attr->debug_area_virt_addr)) {
 | 
			
		||||
		host_attr->debug_area_size = 0;
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -206,7 +206,7 @@ static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	/* Packet buffers should be 64B aligned */
 | 
			
		||||
	pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
 | 
			
		||||
	pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
 | 
			
		||||
				     GFP_ATOMIC);
 | 
			
		||||
	if (unlikely(!pkt_buf)) {
 | 
			
		||||
		dev_kfree_skb_any(skb);
 | 
			
		||||
| 
						 | 
				
			
			@ -428,7 +428,7 @@ static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
 | 
			
		|||
	ring->ndev = ndev;
 | 
			
		||||
 | 
			
		||||
	size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
 | 
			
		||||
	ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr,
 | 
			
		||||
	ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr,
 | 
			
		||||
					     GFP_KERNEL);
 | 
			
		||||
	if (!ring->desc_addr)
 | 
			
		||||
		goto err;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -660,10 +660,9 @@ static int alx_alloc_rings(struct alx_priv *alx)
 | 
			
		|||
			    alx->num_txq +
 | 
			
		||||
			    sizeof(struct alx_rrd) * alx->rx_ringsz +
 | 
			
		||||
			    sizeof(struct alx_rfd) * alx->rx_ringsz;
 | 
			
		||||
	alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
 | 
			
		||||
	alx->descmem.virt = dma_alloc_coherent(&alx->hw.pdev->dev,
 | 
			
		||||
					       alx->descmem.size,
 | 
			
		||||
						&alx->descmem.dma,
 | 
			
		||||
						GFP_KERNEL);
 | 
			
		||||
					       &alx->descmem.dma, GFP_KERNEL);
 | 
			
		||||
	if (!alx->descmem.virt)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1019,7 +1019,7 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
 | 
			
		|||
		sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
 | 
			
		||||
		8 * 4;
 | 
			
		||||
 | 
			
		||||
	ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size,
 | 
			
		||||
	ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
 | 
			
		||||
					       &ring_header->dma, GFP_KERNEL);
 | 
			
		||||
	if (unlikely(!ring_header->desc)) {
 | 
			
		||||
		dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -936,7 +936,7 @@ static int bcm_enet_open(struct net_device *dev)
 | 
			
		|||
 | 
			
		||||
	/* allocate rx dma ring */
 | 
			
		||||
	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
 | 
			
		||||
	p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
 | 
			
		||||
	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
 | 
			
		||||
	if (!p) {
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
		goto out_freeirq_tx;
 | 
			
		||||
| 
						 | 
				
			
			@ -947,7 +947,7 @@ static int bcm_enet_open(struct net_device *dev)
 | 
			
		|||
 | 
			
		||||
	/* allocate tx dma ring */
 | 
			
		||||
	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
 | 
			
		||||
	p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
 | 
			
		||||
	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
 | 
			
		||||
	if (!p) {
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
		goto out_free_rx_ring;
 | 
			
		||||
| 
						 | 
				
			
			@ -2120,7 +2120,7 @@ static int bcm_enetsw_open(struct net_device *dev)
 | 
			
		|||
 | 
			
		||||
	/* allocate rx dma ring */
 | 
			
		||||
	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
 | 
			
		||||
	p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
 | 
			
		||||
	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
 | 
			
		||||
	if (!p) {
 | 
			
		||||
		dev_err(kdev, "cannot allocate rx ring %u\n", size);
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -2132,7 +2132,7 @@ static int bcm_enetsw_open(struct net_device *dev)
 | 
			
		|||
 | 
			
		||||
	/* allocate tx dma ring */
 | 
			
		||||
	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
 | 
			
		||||
	p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
 | 
			
		||||
	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
 | 
			
		||||
	if (!p) {
 | 
			
		||||
		dev_err(kdev, "cannot allocate tx ring\n");
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1506,7 +1506,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
 | 
			
		|||
	/* We just need one DMA descriptor which is DMA-able, since writing to
 | 
			
		||||
	 * the port will allocate a new descriptor in its internal linked-list
 | 
			
		||||
	 */
 | 
			
		||||
	p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
 | 
			
		||||
	p = dma_alloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
 | 
			
		||||
			       GFP_KERNEL);
 | 
			
		||||
	if (!p) {
 | 
			
		||||
		netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -634,7 +634,7 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
 | 
			
		|||
 | 
			
		||||
		/* Alloc ring of descriptors */
 | 
			
		||||
		size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
 | 
			
		||||
		ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
 | 
			
		||||
		ring->cpu_base = dma_alloc_coherent(dma_dev, size,
 | 
			
		||||
						    &ring->dma_base,
 | 
			
		||||
						    GFP_KERNEL);
 | 
			
		||||
		if (!ring->cpu_base) {
 | 
			
		||||
| 
						 | 
				
			
			@ -659,7 +659,7 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
 | 
			
		|||
 | 
			
		||||
		/* Alloc ring of descriptors */
 | 
			
		||||
		size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
 | 
			
		||||
		ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
 | 
			
		||||
		ring->cpu_base = dma_alloc_coherent(dma_dev, size,
 | 
			
		||||
						    &ring->dma_base,
 | 
			
		||||
						    GFP_KERNEL);
 | 
			
		||||
		if (!ring->cpu_base) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -844,7 +844,7 @@ bnx2_alloc_stats_blk(struct net_device *dev)
 | 
			
		|||
						 BNX2_SBLK_MSIX_ALIGN_SIZE);
 | 
			
		||||
	bp->status_stats_size = status_blk_size +
 | 
			
		||||
				sizeof(struct statistics_block);
 | 
			
		||||
	status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
 | 
			
		||||
	status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
 | 
			
		||||
					&bp->status_blk_mapping, GFP_KERNEL);
 | 
			
		||||
	if (!status_blk)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3449,7 +3449,7 @@ static int bnxt_alloc_stats(struct bnxt *bp)
 | 
			
		|||
			goto alloc_tx_ext_stats;
 | 
			
		||||
 | 
			
		||||
		bp->hw_rx_port_stats_ext =
 | 
			
		||||
			dma_zalloc_coherent(&pdev->dev,
 | 
			
		||||
			dma_alloc_coherent(&pdev->dev,
 | 
			
		||||
					   sizeof(struct rx_port_stats_ext),
 | 
			
		||||
					   &bp->hw_rx_port_stats_ext_map,
 | 
			
		||||
					   GFP_KERNEL);
 | 
			
		||||
| 
						 | 
				
			
			@ -3462,7 +3462,7 @@ static int bnxt_alloc_stats(struct bnxt *bp)
 | 
			
		|||
 | 
			
		||||
		if (bp->hwrm_spec_code >= 0x10902) {
 | 
			
		||||
			bp->hw_tx_port_stats_ext =
 | 
			
		||||
				dma_zalloc_coherent(&pdev->dev,
 | 
			
		||||
				dma_alloc_coherent(&pdev->dev,
 | 
			
		||||
						   sizeof(struct tx_port_stats_ext),
 | 
			
		||||
						   &bp->hw_tx_port_stats_ext_map,
 | 
			
		||||
						   GFP_KERNEL);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -316,7 +316,7 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
 | 
			
		|||
 | 
			
		||||
	n = IEEE_8021QAZ_MAX_TCS;
 | 
			
		||||
	data_len = sizeof(*data) + sizeof(*fw_app) * n;
 | 
			
		||||
	data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping,
 | 
			
		||||
	data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
 | 
			
		||||
				  GFP_KERNEL);
 | 
			
		||||
	if (!data)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -85,7 +85,7 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
 | 
			
		|||
		return -EFAULT;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize,
 | 
			
		||||
	data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize,
 | 
			
		||||
				       &data_dma_addr, GFP_KERNEL);
 | 
			
		||||
	if (!data_addr)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -8712,7 +8712,7 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
 | 
			
		|||
		if (!i && tg3_flag(tp, ENABLE_RSS))
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
		tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
 | 
			
		||||
		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
 | 
			
		||||
						   TG3_RX_RCB_RING_BYTES(tp),
 | 
			
		||||
						   &tnapi->rx_rcb_mapping,
 | 
			
		||||
						   GFP_KERNEL);
 | 
			
		||||
| 
						 | 
				
			
			@ -8768,7 +8768,7 @@ static int tg3_alloc_consistent(struct tg3 *tp)
 | 
			
		|||
{
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
 | 
			
		||||
	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
 | 
			
		||||
					  sizeof(struct tg3_hw_stats),
 | 
			
		||||
					  &tp->stats_mapping, GFP_KERNEL);
 | 
			
		||||
	if (!tp->hw_stats)
 | 
			
		||||
| 
						 | 
				
			
			@ -8778,7 +8778,7 @@ static int tg3_alloc_consistent(struct tg3 *tp)
 | 
			
		|||
		struct tg3_napi *tnapi = &tp->napi[i];
 | 
			
		||||
		struct tg3_hw_status *sblk;
 | 
			
		||||
 | 
			
		||||
		tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
 | 
			
		||||
		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
 | 
			
		||||
						      TG3_HW_STATUS_SIZE,
 | 
			
		||||
						      &tnapi->status_mapping,
 | 
			
		||||
						      GFP_KERNEL);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -59,7 +59,7 @@ static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
 | 
			
		|||
	dmem->q_len = q_len;
 | 
			
		||||
	dmem->size = (desc_size * q_len) + align_bytes;
 | 
			
		||||
	/* Save address, need it while freeing */
 | 
			
		||||
	dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
 | 
			
		||||
	dmem->unalign_base = dma_alloc_coherent(&nic->pdev->dev, dmem->size,
 | 
			
		||||
						&dmem->dma, GFP_KERNEL);
 | 
			
		||||
	if (!dmem->unalign_base)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -620,7 +620,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
 | 
			
		|||
{
 | 
			
		||||
	size_t len = nelem * elem_size;
 | 
			
		||||
	void *s = NULL;
 | 
			
		||||
	void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
 | 
			
		||||
	void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
 | 
			
		||||
 | 
			
		||||
	if (!p)
 | 
			
		||||
		return NULL;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -694,7 +694,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
 | 
			
		|||
{
 | 
			
		||||
	size_t len = nelem * elem_size + stat_size;
 | 
			
		||||
	void *s = NULL;
 | 
			
		||||
	void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL);
 | 
			
		||||
	void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
 | 
			
		||||
 | 
			
		||||
	if (!p)
 | 
			
		||||
		return NULL;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -756,7 +756,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
 | 
			
		|||
	 * Allocate the hardware ring and PCI DMA bus address space for said.
 | 
			
		||||
	 */
 | 
			
		||||
	size_t hwlen = nelem * hwsize + stat_size;
 | 
			
		||||
	void *hwring = dma_zalloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
 | 
			
		||||
	void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
 | 
			
		||||
 | 
			
		||||
	if (!hwring)
 | 
			
		||||
		return NULL;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1808,7 +1808,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
 | 
			
		|||
	total_size = buf_len;
 | 
			
		||||
 | 
			
		||||
	get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
 | 
			
		||||
	get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
 | 
			
		||||
	get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
 | 
			
		||||
					    get_fat_cmd.size,
 | 
			
		||||
					    &get_fat_cmd.dma, GFP_ATOMIC);
 | 
			
		||||
	if (!get_fat_cmd.va)
 | 
			
		||||
| 
						 | 
				
			
			@ -2302,7 +2302,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
 | 
			
		|||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	cmd.size = sizeof(struct be_cmd_resp_port_type);
 | 
			
		||||
	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
 | 
			
		||||
	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
 | 
			
		||||
				    GFP_ATOMIC);
 | 
			
		||||
	if (!cmd.va) {
 | 
			
		||||
		dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
 | 
			
		||||
| 
						 | 
				
			
			@ -3066,8 +3066,8 @@ int lancer_fw_download(struct be_adapter *adapter,
 | 
			
		|||
 | 
			
		||||
	flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
 | 
			
		||||
				+ LANCER_FW_DOWNLOAD_CHUNK;
 | 
			
		||||
	flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
 | 
			
		||||
					   &flash_cmd.dma, GFP_KERNEL);
 | 
			
		||||
	flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
 | 
			
		||||
					  GFP_KERNEL);
 | 
			
		||||
	if (!flash_cmd.va)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -3184,7 +3184,7 @@ int be_fw_download(struct be_adapter *adapter, const struct firmware *fw)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
 | 
			
		||||
	flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
 | 
			
		||||
	flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
 | 
			
		||||
					  GFP_KERNEL);
 | 
			
		||||
	if (!flash_cmd.va)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -3435,7 +3435,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
 | 
			
		|||
		goto err;
 | 
			
		||||
	}
 | 
			
		||||
	cmd.size = sizeof(struct be_cmd_req_get_phy_info);
 | 
			
		||||
	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
 | 
			
		||||
	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
 | 
			
		||||
				    GFP_ATOMIC);
 | 
			
		||||
	if (!cmd.va) {
 | 
			
		||||
		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
 | 
			
		||||
| 
						 | 
				
			
			@ -3522,7 +3522,7 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
 | 
			
		|||
 | 
			
		||||
	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
 | 
			
		||||
	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
 | 
			
		||||
	attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
 | 
			
		||||
	attribs_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
 | 
			
		||||
					    attribs_cmd.size,
 | 
			
		||||
					    &attribs_cmd.dma, GFP_ATOMIC);
 | 
			
		||||
	if (!attribs_cmd.va) {
 | 
			
		||||
| 
						 | 
				
			
			@ -3699,7 +3699,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
 | 
			
		|||
 | 
			
		||||
	memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
 | 
			
		||||
	get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
 | 
			
		||||
	get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
 | 
			
		||||
	get_mac_list_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
 | 
			
		||||
						 get_mac_list_cmd.size,
 | 
			
		||||
						 &get_mac_list_cmd.dma,
 | 
			
		||||
						 GFP_ATOMIC);
 | 
			
		||||
| 
						 | 
				
			
			@ -3829,7 +3829,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
 | 
			
		|||
 | 
			
		||||
	memset(&cmd, 0, sizeof(struct be_dma_mem));
 | 
			
		||||
	cmd.size = sizeof(struct be_cmd_req_set_mac_list);
 | 
			
		||||
	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
 | 
			
		||||
	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
 | 
			
		||||
				    GFP_KERNEL);
 | 
			
		||||
	if (!cmd.va)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -4035,7 +4035,7 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
 | 
			
		|||
 | 
			
		||||
	memset(&cmd, 0, sizeof(struct be_dma_mem));
 | 
			
		||||
	cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
 | 
			
		||||
	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
 | 
			
		||||
	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
 | 
			
		||||
				    GFP_ATOMIC);
 | 
			
		||||
	if (!cmd.va) {
 | 
			
		||||
		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
 | 
			
		||||
| 
						 | 
				
			
			@ -4089,7 +4089,7 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
 | 
			
		|||
 | 
			
		||||
	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
 | 
			
		||||
	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
 | 
			
		||||
	extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
 | 
			
		||||
	extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
 | 
			
		||||
					   extfat_cmd.size, &extfat_cmd.dma,
 | 
			
		||||
					   GFP_ATOMIC);
 | 
			
		||||
	if (!extfat_cmd.va)
 | 
			
		||||
| 
						 | 
				
			
			@ -4127,7 +4127,7 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
 | 
			
		|||
 | 
			
		||||
	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
 | 
			
		||||
	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
 | 
			
		||||
	extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
 | 
			
		||||
	extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
 | 
			
		||||
					   extfat_cmd.size, &extfat_cmd.dma,
 | 
			
		||||
					   GFP_ATOMIC);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -4354,7 +4354,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
 | 
			
		|||
 | 
			
		||||
	memset(&cmd, 0, sizeof(struct be_dma_mem));
 | 
			
		||||
	cmd.size = sizeof(struct be_cmd_resp_get_func_config);
 | 
			
		||||
	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
 | 
			
		||||
	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
 | 
			
		||||
				    GFP_ATOMIC);
 | 
			
		||||
	if (!cmd.va) {
 | 
			
		||||
		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
 | 
			
		||||
| 
						 | 
				
			
			@ -4452,7 +4452,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
 | 
			
		|||
 | 
			
		||||
	memset(&cmd, 0, sizeof(struct be_dma_mem));
 | 
			
		||||
	cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
 | 
			
		||||
	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
 | 
			
		||||
	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
 | 
			
		||||
				    GFP_ATOMIC);
 | 
			
		||||
	if (!cmd.va)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -4539,7 +4539,7 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
 | 
			
		|||
 | 
			
		||||
	memset(&cmd, 0, sizeof(struct be_dma_mem));
 | 
			
		||||
	cmd.size = sizeof(struct be_cmd_req_set_profile_config);
 | 
			
		||||
	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
 | 
			
		||||
	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
 | 
			
		||||
				    GFP_ATOMIC);
 | 
			
		||||
	if (!cmd.va)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -274,7 +274,7 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
 | 
			
		|||
	int status = 0;
 | 
			
		||||
 | 
			
		||||
	read_cmd.size = LANCER_READ_FILE_CHUNK;
 | 
			
		||||
	read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
 | 
			
		||||
	read_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, read_cmd.size,
 | 
			
		||||
					 &read_cmd.dma, GFP_ATOMIC);
 | 
			
		||||
 | 
			
		||||
	if (!read_cmd.va) {
 | 
			
		||||
| 
						 | 
				
			
			@ -815,7 +815,7 @@ static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
 | 
			
		||||
	cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
 | 
			
		||||
	cmd.va = dma_alloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
 | 
			
		||||
	if (!cmd.va)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -851,7 +851,7 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
 | 
			
		|||
	};
 | 
			
		||||
 | 
			
		||||
	ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
 | 
			
		||||
	ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
 | 
			
		||||
	ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
 | 
			
		||||
					   ddrdma_cmd.size, &ddrdma_cmd.dma,
 | 
			
		||||
					   GFP_KERNEL);
 | 
			
		||||
	if (!ddrdma_cmd.va)
 | 
			
		||||
| 
						 | 
				
			
			@ -1014,7 +1014,7 @@ static int be_read_eeprom(struct net_device *netdev,
 | 
			
		|||
 | 
			
		||||
	memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
 | 
			
		||||
	eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
 | 
			
		||||
	eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
 | 
			
		||||
	eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
 | 
			
		||||
					   eeprom_cmd.size, &eeprom_cmd.dma,
 | 
			
		||||
					   GFP_KERNEL);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -167,8 +167,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
 | 
			
		|||
	q->len = len;
 | 
			
		||||
	q->entry_size = entry_size;
 | 
			
		||||
	mem->size = len * entry_size;
 | 
			
		||||
	mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
 | 
			
		||||
				      GFP_KERNEL);
 | 
			
		||||
	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
 | 
			
		||||
				     &mem->dma, GFP_KERNEL);
 | 
			
		||||
	if (!mem->va)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -5766,7 +5766,7 @@ static int be_drv_init(struct be_adapter *adapter)
 | 
			
		|||
	int status = 0;
 | 
			
		||||
 | 
			
		||||
	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
 | 
			
		||||
	mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
 | 
			
		||||
	mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
 | 
			
		||||
						&mbox_mem_alloc->dma,
 | 
			
		||||
						GFP_KERNEL);
 | 
			
		||||
	if (!mbox_mem_alloc->va)
 | 
			
		||||
| 
						 | 
				
			
			@ -5777,7 +5777,7 @@ static int be_drv_init(struct be_adapter *adapter)
 | 
			
		|||
	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
 | 
			
		||||
 | 
			
		||||
	rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
 | 
			
		||||
	rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
 | 
			
		||||
	rx_filter->va = dma_alloc_coherent(dev, rx_filter->size,
 | 
			
		||||
					   &rx_filter->dma, GFP_KERNEL);
 | 
			
		||||
	if (!rx_filter->va) {
 | 
			
		||||
		status = -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -5792,7 +5792,7 @@ static int be_drv_init(struct be_adapter *adapter)
 | 
			
		|||
		stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
 | 
			
		||||
	else
 | 
			
		||||
		stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
 | 
			
		||||
	stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
 | 
			
		||||
	stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size,
 | 
			
		||||
					   &stats_cmd->dma, GFP_KERNEL);
 | 
			
		||||
	if (!stats_cmd->va) {
 | 
			
		||||
		status = -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -935,15 +935,13 @@ static int ftgmac100_alloc_rings(struct ftgmac100 *priv)
 | 
			
		|||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	/* Allocate descriptors */
 | 
			
		||||
	priv->rxdes = dma_zalloc_coherent(priv->dev,
 | 
			
		||||
					  MAX_RX_QUEUE_ENTRIES *
 | 
			
		||||
					  sizeof(struct ftgmac100_rxdes),
 | 
			
		||||
	priv->rxdes = dma_alloc_coherent(priv->dev,
 | 
			
		||||
					 MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes),
 | 
			
		||||
					 &priv->rxdes_dma, GFP_KERNEL);
 | 
			
		||||
	if (!priv->rxdes)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	priv->txdes = dma_zalloc_coherent(priv->dev,
 | 
			
		||||
					  MAX_TX_QUEUE_ENTRIES *
 | 
			
		||||
					  sizeof(struct ftgmac100_txdes),
 | 
			
		||||
	priv->txdes = dma_alloc_coherent(priv->dev,
 | 
			
		||||
					 MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes),
 | 
			
		||||
					 &priv->txdes_dma, GFP_KERNEL);
 | 
			
		||||
	if (!priv->txdes)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -734,10 +734,9 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
 | 
			
		|||
{
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	priv->descs = dma_zalloc_coherent(priv->dev,
 | 
			
		||||
	priv->descs = dma_alloc_coherent(priv->dev,
 | 
			
		||||
					 sizeof(struct ftmac100_descs),
 | 
			
		||||
					  &priv->descs_dma_addr,
 | 
			
		||||
					  GFP_KERNEL);
 | 
			
		||||
					 &priv->descs_dma_addr, GFP_KERNEL);
 | 
			
		||||
	if (!priv->descs)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1006,7 +1006,7 @@ static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
 | 
			
		|||
 | 
			
		||||
	for (i = 0; i < QUEUE_NUMS; i++) {
 | 
			
		||||
		size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
 | 
			
		||||
		virt_addr = dma_zalloc_coherent(dev, size, &phys_addr,
 | 
			
		||||
		virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
 | 
			
		||||
					       GFP_KERNEL);
 | 
			
		||||
		if (virt_addr == NULL)
 | 
			
		||||
			goto error_free_pool;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2041,9 +2041,8 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring)
 | 
			
		|||
{
 | 
			
		||||
	int size = ring->desc_num * sizeof(ring->desc[0]);
 | 
			
		||||
 | 
			
		||||
	ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
 | 
			
		||||
					 &ring->desc_dma_addr,
 | 
			
		||||
					 GFP_KERNEL);
 | 
			
		||||
	ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
 | 
			
		||||
					&ring->desc_dma_addr, GFP_KERNEL);
 | 
			
		||||
	if (!ring->desc)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -39,9 +39,8 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
 | 
			
		|||
{
 | 
			
		||||
	int size  = ring->desc_num * sizeof(struct hclge_desc);
 | 
			
		||||
 | 
			
		||||
	ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
 | 
			
		||||
					 size, &ring->desc_dma_addr,
 | 
			
		||||
					 GFP_KERNEL);
 | 
			
		||||
	ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
 | 
			
		||||
					&ring->desc_dma_addr, GFP_KERNEL);
 | 
			
		||||
	if (!ring->desc)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -115,9 +115,8 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
 | 
			
		|||
{
 | 
			
		||||
	int size = ring->desc_num * sizeof(struct hclgevf_desc);
 | 
			
		||||
 | 
			
		||||
	ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
 | 
			
		||||
					 size, &ring->desc_dma_addr,
 | 
			
		||||
					 GFP_KERNEL);
 | 
			
		||||
	ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
 | 
			
		||||
					&ring->desc_dma_addr, GFP_KERNEL);
 | 
			
		||||
	if (!ring->desc)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -613,7 +613,7 @@ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain,
 | 
			
		|||
	u8 *cmd_vaddr;
 | 
			
		||||
	int err = 0;
 | 
			
		||||
 | 
			
		||||
	cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
 | 
			
		||||
	cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
 | 
			
		||||
				       &cmd_paddr, GFP_KERNEL);
 | 
			
		||||
	if (!cmd_vaddr) {
 | 
			
		||||
		dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n");
 | 
			
		||||
| 
						 | 
				
			
			@ -663,8 +663,8 @@ static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain,
 | 
			
		|||
	dma_addr_t node_paddr;
 | 
			
		||||
	int err;
 | 
			
		||||
 | 
			
		||||
	node = dma_zalloc_coherent(&pdev->dev, chain->cell_size,
 | 
			
		||||
				   &node_paddr, GFP_KERNEL);
 | 
			
		||||
	node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr,
 | 
			
		||||
				  GFP_KERNEL);
 | 
			
		||||
	if (!node) {
 | 
			
		||||
		dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n");
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -821,7 +821,7 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain,
 | 
			
		|||
	if (!chain->cell_ctxt)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	chain->wb_status = dma_zalloc_coherent(&pdev->dev,
 | 
			
		||||
	chain->wb_status = dma_alloc_coherent(&pdev->dev,
 | 
			
		||||
					      sizeof(*chain->wb_status),
 | 
			
		||||
					      &chain->wb_status_paddr,
 | 
			
		||||
					      GFP_KERNEL);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -593,7 +593,7 @@ static int alloc_eq_pages(struct hinic_eq *eq)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	for (pg = 0; pg < eq->num_pages; pg++) {
 | 
			
		||||
		eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev,
 | 
			
		||||
		eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev,
 | 
			
		||||
						       eq->page_size,
 | 
			
		||||
						       &eq->dma_addr[pg],
 | 
			
		||||
						       GFP_KERNEL);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -355,7 +355,7 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
 | 
			
		|||
		goto err_sq_db;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
 | 
			
		||||
	ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
 | 
			
		||||
					  &func_to_io->ci_dma_base,
 | 
			
		||||
					  GFP_KERNEL);
 | 
			
		||||
	if (!ci_addr_base) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -336,7 +336,7 @@ static int alloc_rq_cqe(struct hinic_rq *rq)
 | 
			
		|||
		goto err_cqe_dma_arr_alloc;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < wq->q_depth; i++) {
 | 
			
		||||
		rq->cqe[i] = dma_zalloc_coherent(&pdev->dev,
 | 
			
		||||
		rq->cqe[i] = dma_alloc_coherent(&pdev->dev,
 | 
			
		||||
						sizeof(*rq->cqe[i]),
 | 
			
		||||
						&rq->cqe_dma[i], GFP_KERNEL);
 | 
			
		||||
		if (!rq->cqe[i])
 | 
			
		||||
| 
						 | 
				
			
			@ -415,7 +415,7 @@ int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
 | 
			
		|||
 | 
			
		||||
	/* HW requirements: Must be at least 32 bit */
 | 
			
		||||
	pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
 | 
			
		||||
	rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size,
 | 
			
		||||
	rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size,
 | 
			
		||||
					      &rq->pi_dma_addr, GFP_KERNEL);
 | 
			
		||||
	if (!rq->pi_virt_addr) {
 | 
			
		||||
		dev_err(&pdev->dev, "Failed to allocate PI address\n");
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -114,7 +114,7 @@ static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr,
 | 
			
		|||
	struct pci_dev *pdev = hwif->pdev;
 | 
			
		||||
	dma_addr_t dma_addr;
 | 
			
		||||
 | 
			
		||||
	*vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr,
 | 
			
		||||
	*vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr,
 | 
			
		||||
				    GFP_KERNEL);
 | 
			
		||||
	if (!*vaddr) {
 | 
			
		||||
		dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n");
 | 
			
		||||
| 
						 | 
				
			
			@ -482,7 +482,7 @@ static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
 | 
			
		|||
		u64 *paddr = &wq->block_vaddr[i];
 | 
			
		||||
		dma_addr_t dma_addr;
 | 
			
		||||
 | 
			
		||||
		*vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size,
 | 
			
		||||
		*vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size,
 | 
			
		||||
					    &dma_addr, GFP_KERNEL);
 | 
			
		||||
		if (!*vaddr) {
 | 
			
		||||
			dev_err(&pdev->dev, "Failed to allocate wq page\n");
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -636,7 +636,7 @@ static int mal_probe(struct platform_device *ofdev)
 | 
			
		|||
	bd_size = sizeof(struct mal_descriptor) *
 | 
			
		||||
		(NUM_TX_BUFF * mal->num_tx_chans +
 | 
			
		||||
		 NUM_RX_BUFF * mal->num_rx_chans);
 | 
			
		||||
	mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
 | 
			
		||||
	mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
 | 
			
		||||
					  GFP_KERNEL);
 | 
			
		||||
	if (mal->bd_virt == NULL) {
 | 
			
		||||
		err = -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -993,7 +993,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
 | 
			
		|||
 | 
			
		||||
	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
 | 
			
		||||
	txdr->size = ALIGN(txdr->size, 4096);
 | 
			
		||||
	txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
 | 
			
		||||
	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
 | 
			
		||||
					GFP_KERNEL);
 | 
			
		||||
	if (!txdr->desc) {
 | 
			
		||||
		ret_val = 2;
 | 
			
		||||
| 
						 | 
				
			
			@ -1051,7 +1051,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
 | 
			
		||||
	rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
 | 
			
		||||
	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
 | 
			
		||||
					GFP_KERNEL);
 | 
			
		||||
	if (!rxdr->desc) {
 | 
			
		||||
		ret_val = 6;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2305,7 +2305,7 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
 | 
			
		|||
{
 | 
			
		||||
	struct pci_dev *pdev = adapter->pdev;
 | 
			
		||||
 | 
			
		||||
	ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma,
 | 
			
		||||
	ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
 | 
			
		||||
					GFP_KERNEL);
 | 
			
		||||
	if (!ring->desc)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -109,8 +109,8 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
 | 
			
		|||
	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
 | 
			
		||||
 | 
			
		||||
	mem->size = ALIGN(size, alignment);
 | 
			
		||||
	mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
 | 
			
		||||
				      &mem->pa, GFP_KERNEL);
 | 
			
		||||
	mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
 | 
			
		||||
				     GFP_KERNEL);
 | 
			
		||||
	if (!mem->va)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -680,7 +680,7 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
 | 
			
		|||
	txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
 | 
			
		||||
	txdr->size = ALIGN(txdr->size, 4096);
 | 
			
		||||
 | 
			
		||||
	txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
 | 
			
		||||
	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
 | 
			
		||||
					GFP_KERNEL);
 | 
			
		||||
	if (!txdr->desc) {
 | 
			
		||||
		vfree(txdr->buffer_info);
 | 
			
		||||
| 
						 | 
				
			
			@ -763,7 +763,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
 | 
			
		|||
	rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
 | 
			
		||||
	rxdr->size = ALIGN(rxdr->size, 4096);
 | 
			
		||||
 | 
			
		||||
	rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
 | 
			
		||||
	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
 | 
			
		||||
					GFP_KERNEL);
 | 
			
		||||
 | 
			
		||||
	if (!rxdr->desc) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2044,7 +2044,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
 | 
			
		|||
	u32 txq_dma;
 | 
			
		||||
 | 
			
		||||
	/* Allocate memory for TX descriptors */
 | 
			
		||||
	aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
 | 
			
		||||
	aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
 | 
			
		||||
					     MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
 | 
			
		||||
					     &aggr_txq->descs_dma, GFP_KERNEL);
 | 
			
		||||
	if (!aggr_txq->descs)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -557,7 +557,7 @@ static int init_hash_table(struct pxa168_eth_private *pep)
 | 
			
		|||
	 * table is full.
 | 
			
		||||
	 */
 | 
			
		||||
	if (!pep->htpr) {
 | 
			
		||||
		pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent,
 | 
			
		||||
		pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
 | 
			
		||||
					       HASH_ADDR_TABLE_SIZE,
 | 
			
		||||
					       &pep->htpr_dma, GFP_KERNEL);
 | 
			
		||||
		if (!pep->htpr)
 | 
			
		||||
| 
						 | 
				
			
			@ -1044,7 +1044,7 @@ static int rxq_init(struct net_device *dev)
 | 
			
		|||
	pep->rx_desc_count = 0;
 | 
			
		||||
	size = pep->rx_ring_size * sizeof(struct rx_desc);
 | 
			
		||||
	pep->rx_desc_area_size = size;
 | 
			
		||||
	pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
 | 
			
		||||
	pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
 | 
			
		||||
						 &pep->rx_desc_dma,
 | 
			
		||||
						 GFP_KERNEL);
 | 
			
		||||
	if (!pep->p_rx_desc_area)
 | 
			
		||||
| 
						 | 
				
			
			@ -1103,7 +1103,7 @@ static int txq_init(struct net_device *dev)
 | 
			
		|||
	pep->tx_desc_count = 0;
 | 
			
		||||
	size = pep->tx_ring_size * sizeof(struct tx_desc);
 | 
			
		||||
	pep->tx_desc_area_size = size;
 | 
			
		||||
	pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
 | 
			
		||||
	pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
 | 
			
		||||
						 &pep->tx_desc_dma,
 | 
			
		||||
						 GFP_KERNEL);
 | 
			
		||||
	if (!pep->p_tx_desc_area)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -598,7 +598,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
 | 
			
		|||
	dma_addr_t dma_addr;
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	eth->scratch_ring = dma_zalloc_coherent(eth->dev,
 | 
			
		||||
	eth->scratch_ring = dma_alloc_coherent(eth->dev,
 | 
			
		||||
					       cnt * sizeof(struct mtk_tx_dma),
 | 
			
		||||
					       ð->phy_scratch_ring,
 | 
			
		||||
					       GFP_ATOMIC);
 | 
			
		||||
| 
						 | 
				
			
			@ -1213,7 +1213,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
 | 
			
		|||
	if (!ring->buf)
 | 
			
		||||
		goto no_tx_mem;
 | 
			
		||||
 | 
			
		||||
	ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
 | 
			
		||||
	ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
 | 
			
		||||
				       &ring->phys, GFP_ATOMIC);
 | 
			
		||||
	if (!ring->dma)
 | 
			
		||||
		goto no_tx_mem;
 | 
			
		||||
| 
						 | 
				
			
			@ -1310,7 +1310,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
 | 
			
		|||
			return -ENOMEM;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ring->dma = dma_zalloc_coherent(eth->dev,
 | 
			
		||||
	ring->dma = dma_alloc_coherent(eth->dev,
 | 
			
		||||
				       rx_dma_size * sizeof(*ring->dma),
 | 
			
		||||
				       &ring->phys, GFP_ATOMIC);
 | 
			
		||||
	if (!ring->dma)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -584,8 +584,8 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
 | 
			
		|||
	buf->npages       = 1;
 | 
			
		||||
	buf->page_shift   = get_order(size) + PAGE_SHIFT;
 | 
			
		||||
	buf->direct.buf   =
 | 
			
		||||
		dma_zalloc_coherent(&dev->persist->pdev->dev,
 | 
			
		||||
				    size, &t, GFP_KERNEL);
 | 
			
		||||
		dma_alloc_coherent(&dev->persist->pdev->dev, size, &t,
 | 
			
		||||
				   GFP_KERNEL);
 | 
			
		||||
	if (!buf->direct.buf)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -624,7 +624,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
 | 
			
		|||
 | 
			
		||||
		for (i = 0; i < buf->nbufs; ++i) {
 | 
			
		||||
			buf->page_list[i].buf =
 | 
			
		||||
				dma_zalloc_coherent(&dev->persist->pdev->dev,
 | 
			
		||||
				dma_alloc_coherent(&dev->persist->pdev->dev,
 | 
			
		||||
						   PAGE_SIZE, &t, GFP_KERNEL);
 | 
			
		||||
			if (!buf->page_list[i].buf)
 | 
			
		||||
				goto err_free;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -63,8 +63,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
 | 
			
		|||
	mutex_lock(&priv->alloc_mutex);
 | 
			
		||||
	original_node = dev_to_node(&dev->pdev->dev);
 | 
			
		||||
	set_dev_node(&dev->pdev->dev, node);
 | 
			
		||||
	cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
 | 
			
		||||
					 dma_handle, GFP_KERNEL);
 | 
			
		||||
	cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle,
 | 
			
		||||
					GFP_KERNEL);
 | 
			
		||||
	set_dev_node(&dev->pdev->dev, original_node);
 | 
			
		||||
	mutex_unlock(&priv->alloc_mutex);
 | 
			
		||||
	return cpu_handle;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1789,7 +1789,7 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
 | 
			
		|||
{
 | 
			
		||||
	struct device *ddev = &dev->pdev->dev;
 | 
			
		||||
 | 
			
		||||
	cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
 | 
			
		||||
	cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
 | 
			
		||||
						&cmd->alloc_dma, GFP_KERNEL);
 | 
			
		||||
	if (!cmd->cmd_alloc_buf)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -1804,7 +1804,7 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
 | 
			
		|||
 | 
			
		||||
	dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
 | 
			
		||||
			  cmd->alloc_dma);
 | 
			
		||||
	cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
 | 
			
		||||
	cmd->cmd_alloc_buf = dma_alloc_coherent(ddev,
 | 
			
		||||
						2 * MLX5_ADAPTER_PAGE_SIZE - 1,
 | 
			
		||||
						&cmd->alloc_dma, GFP_KERNEL);
 | 
			
		||||
	if (!cmd->cmd_alloc_buf)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3604,7 +3604,7 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
 | 
			
		|||
	for (i = 0; i < mgp->num_slices; i++) {
 | 
			
		||||
		ss = &mgp->ss[i];
 | 
			
		||||
		bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
 | 
			
		||||
		ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes,
 | 
			
		||||
		ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
 | 
			
		||||
						       &ss->rx_done.bus,
 | 
			
		||||
						       GFP_KERNEL);
 | 
			
		||||
		if (ss->rx_done.entry == NULL)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2170,7 +2170,7 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
 | 
			
		|||
	tx_ring->cnt = dp->txd_cnt;
 | 
			
		||||
 | 
			
		||||
	tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
 | 
			
		||||
	tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
 | 
			
		||||
	tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
 | 
			
		||||
					   &tx_ring->dma,
 | 
			
		||||
					   GFP_KERNEL | __GFP_NOWARN);
 | 
			
		||||
	if (!tx_ring->txds) {
 | 
			
		||||
| 
						 | 
				
			
			@ -2328,7 +2328,7 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
 | 
			
		|||
 | 
			
		||||
	rx_ring->cnt = dp->rxd_cnt;
 | 
			
		||||
	rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
 | 
			
		||||
	rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
 | 
			
		||||
	rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
 | 
			
		||||
					   &rx_ring->dma,
 | 
			
		||||
					   GFP_KERNEL | __GFP_NOWARN);
 | 
			
		||||
	if (!rx_ring->rxds) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -287,7 +287,7 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
 | 
			
		|||
	priv->rx_bd_ci = 0;
 | 
			
		||||
 | 
			
		||||
	/* Allocate the Tx and Rx buffer descriptors. */
 | 
			
		||||
	priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
 | 
			
		||||
	priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 | 
			
		||||
					   sizeof(*priv->tx_bd_v) * TX_BD_NUM,
 | 
			
		||||
					   &priv->tx_bd_p, GFP_KERNEL);
 | 
			
		||||
	if (!priv->tx_bd_v)
 | 
			
		||||
| 
						 | 
				
			
			@ -299,7 +299,7 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
 | 
			
		|||
	if (!priv->tx_skb)
 | 
			
		||||
		goto out;
 | 
			
		||||
 | 
			
		||||
	priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
 | 
			
		||||
	priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 | 
			
		||||
					   sizeof(*priv->rx_bd_v) * RX_BD_NUM,
 | 
			
		||||
					   &priv->rx_bd_p, GFP_KERNEL);
 | 
			
		||||
	if (!priv->rx_bd_v)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1440,7 +1440,7 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
 | 
			
		|||
 | 
			
		||||
	size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
 | 
			
		||||
	rx_ring->rx_buff_pool =
 | 
			
		||||
		dma_zalloc_coherent(&pdev->dev, size,
 | 
			
		||||
		dma_alloc_coherent(&pdev->dev, size,
 | 
			
		||||
				   &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
 | 
			
		||||
	if (!rx_ring->rx_buff_pool)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -1755,7 +1755,7 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
 | 
			
		|||
 | 
			
		||||
	tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
 | 
			
		||||
 | 
			
		||||
	tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
 | 
			
		||||
	tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
 | 
			
		||||
					   &tx_ring->dma, GFP_KERNEL);
 | 
			
		||||
	if (!tx_ring->desc) {
 | 
			
		||||
		vfree(tx_ring->buffer_info);
 | 
			
		||||
| 
						 | 
				
			
			@ -1798,7 +1798,7 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
 | 
			
		|||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
 | 
			
		||||
	rx_ring->desc =	dma_zalloc_coherent(&pdev->dev, rx_ring->size,
 | 
			
		||||
	rx_ring->desc =	dma_alloc_coherent(&pdev->dev, rx_ring->size,
 | 
			
		||||
						  &rx_ring->dma, GFP_KERNEL);
 | 
			
		||||
	if (!rx_ring->desc) {
 | 
			
		||||
		vfree(rx_ring->buffer_info);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -401,7 +401,7 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
 | 
			
		|||
	if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
 | 
			
		||||
		goto out_ring_desc;
 | 
			
		||||
 | 
			
		||||
	ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev,
 | 
			
		||||
	ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
 | 
			
		||||
					   RX_RING_SIZE * sizeof(u64),
 | 
			
		||||
					   &ring->buf_dma, GFP_KERNEL);
 | 
			
		||||
	if (!ring->buffers)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -936,8 +936,8 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
 | 
			
		|||
		u32 size = min_t(u32, total_size, psz);
 | 
			
		||||
		void **p_virt = &p_mngr->t2[i].p_virt;
 | 
			
		||||
 | 
			
		||||
		*p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
 | 
			
		||||
					      size, &p_mngr->t2[i].p_phys,
 | 
			
		||||
		*p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
 | 
			
		||||
					     &p_mngr->t2[i].p_phys,
 | 
			
		||||
					     GFP_KERNEL);
 | 
			
		||||
		if (!p_mngr->t2[i].p_virt) {
 | 
			
		||||
			rc = -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -1054,7 +1054,7 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
 | 
			
		|||
		u32 size;
 | 
			
		||||
 | 
			
		||||
		size = min_t(u32, sz_left, p_blk->real_size_in_page);
 | 
			
		||||
		p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size,
 | 
			
		||||
		p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
 | 
			
		||||
					    &p_phys, GFP_KERNEL);
 | 
			
		||||
		if (!p_virt)
 | 
			
		||||
			return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -2306,7 +2306,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
 | 
			
		|||
		goto out0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
 | 
			
		||||
	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 | 
			
		||||
				    p_blk->real_size_in_page, &p_phys,
 | 
			
		||||
				    GFP_KERNEL);
 | 
			
		||||
	if (!p_virt) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -434,13 +434,13 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
 | 
			
		|||
	*(tx_ring->hw_consumer) = 0;
 | 
			
		||||
 | 
			
		||||
	rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
 | 
			
		||||
	rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size,
 | 
			
		||||
	rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
 | 
			
		||||
				     &rq_phys_addr, GFP_KERNEL);
 | 
			
		||||
	if (!rq_addr)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
 | 
			
		||||
	rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size,
 | 
			
		||||
	rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
 | 
			
		||||
				      &rsp_phys_addr, GFP_KERNEL);
 | 
			
		||||
	if (!rsp_addr) {
 | 
			
		||||
		err = -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -855,7 +855,7 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
 | 
			
		|||
	struct qlcnic_cmd_args cmd;
 | 
			
		||||
	size_t  nic_size = sizeof(struct qlcnic_info_le);
 | 
			
		||||
 | 
			
		||||
	nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
 | 
			
		||||
	nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
 | 
			
		||||
					   &nic_dma_t, GFP_KERNEL);
 | 
			
		||||
	if (!nic_info_addr)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -909,7 +909,7 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
 | 
			
		|||
	if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
 | 
			
		||||
		return err;
 | 
			
		||||
 | 
			
		||||
	nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
 | 
			
		||||
	nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
 | 
			
		||||
					   &nic_dma_t, GFP_KERNEL);
 | 
			
		||||
	if (!nic_info_addr)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -964,7 +964,7 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
 | 
			
		|||
	void *pci_info_addr;
 | 
			
		||||
	int err = 0, i;
 | 
			
		||||
 | 
			
		||||
	pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
 | 
			
		||||
	pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
 | 
			
		||||
					   &pci_info_dma_t, GFP_KERNEL);
 | 
			
		||||
	if (!pci_info_addr)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -1078,7 +1078,7 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
 | 
			
		|||
		return -EIO;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
 | 
			
		||||
	stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
 | 
			
		||||
					&stats_dma_t, GFP_KERNEL);
 | 
			
		||||
	if (!stats_addr)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -1134,7 +1134,7 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
 | 
			
		|||
	if (mac_stats == NULL)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
 | 
			
		||||
	stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
 | 
			
		||||
					&stats_dma_t, GFP_KERNEL);
 | 
			
		||||
	if (!stats_addr)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -776,7 +776,7 @@ int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt)
 | 
			
		|||
			    8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */
 | 
			
		||||
 | 
			
		||||
	ring_header->used = 0;
 | 
			
		||||
	ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size,
 | 
			
		||||
	ring_header->v_addr = dma_alloc_coherent(dev, ring_header->size,
 | 
			
		||||
						 &ring_header->dma_addr,
 | 
			
		||||
						 GFP_KERNEL);
 | 
			
		||||
	if (!ring_header->v_addr)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -400,7 +400,7 @@ static int init_tx_ring(struct device *dev, u8 queue_no,
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	/* allocate memory for TX descriptors */
 | 
			
		||||
	tx_ring->dma_tx = dma_zalloc_coherent(dev,
 | 
			
		||||
	tx_ring->dma_tx = dma_alloc_coherent(dev,
 | 
			
		||||
					     tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
 | 
			
		||||
					     &tx_ring->dma_tx_phy, GFP_KERNEL);
 | 
			
		||||
	if (!tx_ring->dma_tx)
 | 
			
		||||
| 
						 | 
				
			
			@ -479,7 +479,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
 | 
			
		|||
	rx_ring->queue_no = queue_no;
 | 
			
		||||
 | 
			
		||||
	/* allocate memory for RX descriptors */
 | 
			
		||||
	rx_ring->dma_rx = dma_zalloc_coherent(priv->device,
 | 
			
		||||
	rx_ring->dma_rx = dma_alloc_coherent(priv->device,
 | 
			
		||||
					     rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
 | 
			
		||||
					     &rx_ring->dma_rx_phy, GFP_KERNEL);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
Some files were not shown because too many files have changed in this diff Show more
		Loading…
	
		Reference in a new issue