mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	net: WQ_PERCPU added to alloc_workqueue users
Currently if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistentcy cannot be addressed without refactoring the API. alloc_workqueue() treats all queues as per-CPU by default, while unbound workqueues must opt-in via WQ_UNBOUND. This default is suboptimal: most workloads benefit from unbound queues, allowing the scheduler to place worker threads where they’re needed and reducing noise when CPUs are isolated. This change adds a new WQ_PERCPU flag at the network subsystem, to explicitly request the use of the per-CPU behavior. Both flags coexist for one release cycle to allow callers to transition their calls. Once migration is complete, WQ_UNBOUND can be removed and unbound will become the implicit default. With the introduction of the WQ_PERCPU flag (equivalent to !WQ_UNBOUND), any alloc_workqueue() caller that doesn’t explicitly specify WQ_UNBOUND must now use WQ_PERCPU. All existing users have been updated accordingly. Suggested-by: Tejun Heo <tj@kernel.org> Signed-off-by: Marco Crivellari <marco.crivellari@suse.com> Link: https://patch.msgid.link/20250918142427.309519-4-marco.crivellari@suse.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
		
							parent
							
								
									5fd8bb982e
								
							
						
					
					
						commit
						27ce71e1ce
					
				
					 35 changed files with 57 additions and 42 deletions
				
			
		|  | @ -770,7 +770,8 @@ static int hi3110_open(struct net_device *net) | ||||||
| 		goto out_close; | 		goto out_close; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, | 	priv->wq = alloc_workqueue("hi3110_wq", | ||||||
|  | 				   WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU, | ||||||
| 				   0); | 				   0); | ||||||
| 	if (!priv->wq) { | 	if (!priv->wq) { | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -1378,7 +1378,8 @@ static int mcp251x_can_probe(struct spi_device *spi) | ||||||
| 	if (ret) | 	if (ret) | ||||||
| 		goto out_clk; | 		goto out_clk; | ||||||
| 
 | 
 | ||||||
| 	priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, | 	priv->wq = alloc_workqueue("mcp251x_wq", | ||||||
|  | 				   WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU, | ||||||
| 				   0); | 				   0); | ||||||
| 	if (!priv->wq) { | 	if (!priv->wq) { | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -472,7 +472,7 @@ int setup_rx_oom_poll_fn(struct net_device *netdev) | ||||||
| 		q_no = lio->linfo.rxpciq[q].s.q_no; | 		q_no = lio->linfo.rxpciq[q].s.q_no; | ||||||
| 		wq = &lio->rxq_status_wq[q_no]; | 		wq = &lio->rxq_status_wq[q_no]; | ||||||
| 		wq->wq = alloc_workqueue("rxq-oom-status", | 		wq->wq = alloc_workqueue("rxq-oom-status", | ||||||
| 					 WQ_MEM_RECLAIM, 0); | 					 WQ_MEM_RECLAIM | WQ_PERCPU, 0); | ||||||
| 		if (!wq->wq) { | 		if (!wq->wq) { | ||||||
| 			dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n"); | 			dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n"); | ||||||
| 			return -ENOMEM; | 			return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -526,7 +526,8 @@ static inline int setup_link_status_change_wq(struct net_device *netdev) | ||||||
| 	struct octeon_device *oct = lio->oct_dev; | 	struct octeon_device *oct = lio->oct_dev; | ||||||
| 
 | 
 | ||||||
| 	lio->link_status_wq.wq = alloc_workqueue("link-status", | 	lio->link_status_wq.wq = alloc_workqueue("link-status", | ||||||
| 						 WQ_MEM_RECLAIM, 0); | 						 WQ_MEM_RECLAIM | WQ_PERCPU, | ||||||
|  | 						 0); | ||||||
| 	if (!lio->link_status_wq.wq) { | 	if (!lio->link_status_wq.wq) { | ||||||
| 		dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); | 		dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); | ||||||
| 		return -1; | 		return -1; | ||||||
|  | @ -659,7 +660,8 @@ static inline int setup_sync_octeon_time_wq(struct net_device *netdev) | ||||||
| 	struct octeon_device *oct = lio->oct_dev; | 	struct octeon_device *oct = lio->oct_dev; | ||||||
| 
 | 
 | ||||||
| 	lio->sync_octeon_time_wq.wq = | 	lio->sync_octeon_time_wq.wq = | ||||||
| 		alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0); | 		alloc_workqueue("update-octeon-time", | ||||||
|  | 				WQ_MEM_RECLAIM | WQ_PERCPU, 0); | ||||||
| 	if (!lio->sync_octeon_time_wq.wq) { | 	if (!lio->sync_octeon_time_wq.wq) { | ||||||
| 		dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n"); | 		dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n"); | ||||||
| 		return -1; | 		return -1; | ||||||
|  | @ -1734,7 +1736,7 @@ static inline int setup_tx_poll_fn(struct net_device *netdev) | ||||||
| 	struct octeon_device *oct = lio->oct_dev; | 	struct octeon_device *oct = lio->oct_dev; | ||||||
| 
 | 
 | ||||||
| 	lio->txq_status_wq.wq = alloc_workqueue("txq-status", | 	lio->txq_status_wq.wq = alloc_workqueue("txq-status", | ||||||
| 						WQ_MEM_RECLAIM, 0); | 						WQ_MEM_RECLAIM | WQ_PERCPU, 0); | ||||||
| 	if (!lio->txq_status_wq.wq) { | 	if (!lio->txq_status_wq.wq) { | ||||||
| 		dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n"); | 		dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n"); | ||||||
| 		return -1; | 		return -1; | ||||||
|  |  | ||||||
|  | @ -304,7 +304,8 @@ static int setup_link_status_change_wq(struct net_device *netdev) | ||||||
| 	struct octeon_device *oct = lio->oct_dev; | 	struct octeon_device *oct = lio->oct_dev; | ||||||
| 
 | 
 | ||||||
| 	lio->link_status_wq.wq = alloc_workqueue("link-status", | 	lio->link_status_wq.wq = alloc_workqueue("link-status", | ||||||
| 						 WQ_MEM_RECLAIM, 0); | 						 WQ_MEM_RECLAIM | WQ_PERCPU, | ||||||
|  | 						 0); | ||||||
| 	if (!lio->link_status_wq.wq) { | 	if (!lio->link_status_wq.wq) { | ||||||
| 		dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); | 		dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); | ||||||
| 		return -1; | 		return -1; | ||||||
|  |  | ||||||
|  | @ -132,7 +132,7 @@ int octeon_init_instr_queue(struct octeon_device *oct, | ||||||
| 	oct->fn_list.setup_iq_regs(oct, iq_no); | 	oct->fn_list.setup_iq_regs(oct, iq_no); | ||||||
| 
 | 
 | ||||||
| 	oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db", | 	oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db", | ||||||
| 						     WQ_MEM_RECLAIM, | 						     WQ_MEM_RECLAIM | WQ_PERCPU, | ||||||
| 						     0); | 						     0); | ||||||
| 	if (!oct->check_db_wq[iq_no].wq) { | 	if (!oct->check_db_wq[iq_no].wq) { | ||||||
| 		vfree(iq->request_list); | 		vfree(iq->request_list); | ||||||
|  |  | ||||||
|  | @ -39,7 +39,8 @@ int octeon_setup_response_list(struct octeon_device *oct) | ||||||
| 	} | 	} | ||||||
| 	spin_lock_init(&oct->cmd_resp_wqlock); | 	spin_lock_init(&oct->cmd_resp_wqlock); | ||||||
| 
 | 
 | ||||||
| 	oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0); | 	oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", | ||||||
|  | 					      WQ_MEM_RECLAIM | WQ_PERCPU, 0); | ||||||
| 	if (!oct->dma_comp_wq.wq) { | 	if (!oct->dma_comp_wq.wq) { | ||||||
| 		dev_err(&oct->pci_dev->dev, "failed to create wq thread\n"); | 		dev_err(&oct->pci_dev->dev, "failed to create wq thread\n"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -4884,7 +4884,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) | ||||||
| 	priv->tx_tstamp_type = HWTSTAMP_TX_OFF; | 	priv->tx_tstamp_type = HWTSTAMP_TX_OFF; | ||||||
| 	priv->rx_tstamp = false; | 	priv->rx_tstamp = false; | ||||||
| 
 | 
 | ||||||
| 	priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0); | 	priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", WQ_PERCPU, 0); | ||||||
| 	if (!priv->dpaa2_ptp_wq) { | 	if (!priv->dpaa2_ptp_wq) { | ||||||
| 		err = -ENOMEM; | 		err = -ENOMEM; | ||||||
| 		goto err_wq_alloc; | 		goto err_wq_alloc; | ||||||
|  |  | ||||||
|  | @ -12912,7 +12912,8 @@ static int __init hclge_init(void) | ||||||
| { | { | ||||||
| 	pr_debug("%s is initializing\n", HCLGE_NAME); | 	pr_debug("%s is initializing\n", HCLGE_NAME); | ||||||
| 
 | 
 | ||||||
| 	hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME); | 	hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, | ||||||
|  | 				   HCLGE_NAME); | ||||||
| 	if (!hclge_wq) { | 	if (!hclge_wq) { | ||||||
| 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME); | 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -37,7 +37,7 @@ static int __init fm10k_init_module(void) | ||||||
| 	pr_info("%s\n", fm10k_copyright); | 	pr_info("%s\n", fm10k_copyright); | ||||||
| 
 | 
 | ||||||
| 	/* create driver workqueue */ | 	/* create driver workqueue */ | ||||||
| 	fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, | 	fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 0, | ||||||
| 					  fm10k_driver_name); | 					  fm10k_driver_name); | ||||||
| 	if (!fm10k_workqueue) | 	if (!fm10k_workqueue) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -16617,7 +16617,7 @@ static int __init i40e_init_module(void) | ||||||
| 	 * since we need to be able to guarantee forward progress even under | 	 * since we need to be able to guarantee forward progress even under | ||||||
| 	 * memory pressure. | 	 * memory pressure. | ||||||
| 	 */ | 	 */ | ||||||
| 	i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name); | 	i40e_wq = alloc_workqueue("%s", WQ_PERCPU, 0, i40e_driver_name); | ||||||
| 	if (!i40e_wq) { | 	if (!i40e_wq) { | ||||||
| 		pr_err("%s: Failed to create workqueue\n", i40e_driver_name); | 		pr_err("%s: Failed to create workqueue\n", i40e_driver_name); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -2005,7 +2005,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||||||
| 
 | 
 | ||||||
| 	/* init wq for processing linkup requests */ | 	/* init wq for processing linkup requests */ | ||||||
| 	INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work); | 	INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work); | ||||||
| 	cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0); | 	cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", WQ_PERCPU, 0); | ||||||
| 	if (!cgx->cgx_cmd_workq) { | 	if (!cgx->cgx_cmd_workq) { | ||||||
| 		dev_err(dev, "alloc workqueue failed for cgx cmd"); | 		dev_err(dev, "alloc workqueue failed for cgx cmd"); | ||||||
| 		err = -ENOMEM; | 		err = -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -913,7 +913,7 @@ int rvu_mcs_init(struct rvu *rvu) | ||||||
| 	/* Initialize the wq for handling mcs interrupts */ | 	/* Initialize the wq for handling mcs interrupts */ | ||||||
| 	INIT_LIST_HEAD(&rvu->mcs_intrq_head); | 	INIT_LIST_HEAD(&rvu->mcs_intrq_head); | ||||||
| 	INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task); | 	INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task); | ||||||
| 	rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0); | 	rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", WQ_PERCPU, 0); | ||||||
| 	if (!rvu->mcs_intr_wq) { | 	if (!rvu->mcs_intr_wq) { | ||||||
| 		dev_err(rvu->dev, "mcs alloc workqueue failed\n"); | 		dev_err(rvu->dev, "mcs alloc workqueue failed\n"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -315,7 +315,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu) | ||||||
| 	spin_lock_init(&rvu->cgx_evq_lock); | 	spin_lock_init(&rvu->cgx_evq_lock); | ||||||
| 	INIT_LIST_HEAD(&rvu->cgx_evq_head); | 	INIT_LIST_HEAD(&rvu->cgx_evq_head); | ||||||
| 	INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task); | 	INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task); | ||||||
| 	rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0); | 	rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", WQ_PERCPU, 0); | ||||||
| 	if (!rvu->cgx_evh_wq) { | 	if (!rvu->cgx_evh_wq) { | ||||||
| 		dev_err(rvu->dev, "alloc workqueue failed"); | 		dev_err(rvu->dev, "alloc workqueue failed"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -376,7 +376,7 @@ int rvu_rep_install_mcam_rules(struct rvu *rvu) | ||||||
| 	spin_lock_init(&rvu->rep_evtq_lock); | 	spin_lock_init(&rvu->rep_evtq_lock); | ||||||
| 	INIT_LIST_HEAD(&rvu->rep_evtq_head); | 	INIT_LIST_HEAD(&rvu->rep_evtq_head); | ||||||
| 	INIT_WORK(&rvu->rep_evt_work, rvu_rep_wq_handler); | 	INIT_WORK(&rvu->rep_evt_work, rvu_rep_wq_handler); | ||||||
| 	rvu->rep_evt_wq = alloc_workqueue("rep_evt_wq", 0, 0); | 	rvu->rep_evt_wq = alloc_workqueue("rep_evt_wq", WQ_PERCPU, 0); | ||||||
| 	if (!rvu->rep_evt_wq) { | 	if (!rvu->rep_evt_wq) { | ||||||
| 		dev_err(rvu->dev, "REP workqueue allocation failed\n"); | 		dev_err(rvu->dev, "REP workqueue allocation failed\n"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -798,7 +798,8 @@ int cn10k_ipsec_init(struct net_device *netdev) | ||||||
| 	pf->ipsec.sa_size = sa_size; | 	pf->ipsec.sa_size = sa_size; | ||||||
| 
 | 
 | ||||||
| 	INIT_WORK(&pf->ipsec.sa_work, cn10k_ipsec_sa_wq_handler); | 	INIT_WORK(&pf->ipsec.sa_work, cn10k_ipsec_sa_wq_handler); | ||||||
| 	pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq", 0, 0); | 	pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq", | ||||||
|  | 					     WQ_PERCPU, 0); | ||||||
| 	if (!pf->ipsec.sa_workq) { | 	if (!pf->ipsec.sa_workq) { | ||||||
| 		netdev_err(pf->netdev, "SA alloc workqueue failed\n"); | 		netdev_err(pf->netdev, "SA alloc workqueue failed\n"); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -1500,7 +1500,7 @@ EXPORT_SYMBOL(prestera_device_unregister); | ||||||
| 
 | 
 | ||||||
| static int __init prestera_module_init(void) | static int __init prestera_module_init(void) | ||||||
| { | { | ||||||
| 	prestera_wq = alloc_workqueue("prestera", 0, 0); | 	prestera_wq = alloc_workqueue("prestera", WQ_PERCPU, 0); | ||||||
| 	if (!prestera_wq) | 	if (!prestera_wq) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -898,7 +898,7 @@ static int prestera_pci_probe(struct pci_dev *pdev, | ||||||
| 
 | 
 | ||||||
| 	dev_info(fw->dev.dev, "Prestera FW is ready\n"); | 	dev_info(fw->dev.dev, "Prestera FW is ready\n"); | ||||||
| 
 | 
 | ||||||
| 	fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1); | 	fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI | WQ_PERCPU, 1); | ||||||
| 	if (!fw->wq) { | 	if (!fw->wq) { | ||||||
| 		err = -ENOMEM; | 		err = -ENOMEM; | ||||||
| 		goto err_wq_alloc; | 		goto err_wq_alloc; | ||||||
|  |  | ||||||
|  | @ -886,7 +886,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) | ||||||
| 	if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) | 	if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) | ||||||
| 		return 0; | 		return 0; | ||||||
| 
 | 
 | ||||||
| 	emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0); | 	emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_PERCPU, 0); | ||||||
| 	if (!emad_wq) | 	if (!emad_wq) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	mlxsw_core->emad_wq = emad_wq; | 	mlxsw_core->emad_wq = emad_wq; | ||||||
|  | @ -3381,7 +3381,7 @@ static int __init mlxsw_core_module_init(void) | ||||||
| 	if (err) | 	if (err) | ||||||
| 		return err; | 		return err; | ||||||
| 
 | 
 | ||||||
| 	mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0); | 	mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_PERCPU, 0); | ||||||
| 	if (!mlxsw_wq) { | 	if (!mlxsw_wq) { | ||||||
| 		err = -ENOMEM; | 		err = -ENOMEM; | ||||||
| 		goto err_alloc_workqueue; | 		goto err_alloc_workqueue; | ||||||
|  |  | ||||||
|  | @ -797,7 +797,7 @@ static int nfp_pci_probe(struct pci_dev *pdev, | ||||||
| 	pf->pdev = pdev; | 	pf->pdev = pdev; | ||||||
| 	pf->dev_info = dev_info; | 	pf->dev_info = dev_info; | ||||||
| 
 | 
 | ||||||
| 	pf->wq = alloc_workqueue("nfp-%s", 0, 2, pci_name(pdev)); | 	pf->wq = alloc_workqueue("nfp-%s", WQ_PERCPU, 2, pci_name(pdev)); | ||||||
| 	if (!pf->wq) { | 	if (!pf->wq) { | ||||||
| 		err = -ENOMEM; | 		err = -ENOMEM; | ||||||
| 		goto err_pci_priv_unset; | 		goto err_pci_priv_unset; | ||||||
|  |  | ||||||
|  | @ -1214,7 +1214,8 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev) | ||||||
| 		hwfn = &cdev->hwfns[i]; | 		hwfn = &cdev->hwfns[i]; | ||||||
| 
 | 
 | ||||||
| 		hwfn->slowpath_wq = alloc_workqueue("slowpath-%02x:%02x.%02x", | 		hwfn->slowpath_wq = alloc_workqueue("slowpath-%02x:%02x.%02x", | ||||||
| 					 0, 0, cdev->pdev->bus->number, | 					 WQ_PERCPU, 0, | ||||||
|  | 					 cdev->pdev->bus->number, | ||||||
| 					 PCI_SLOT(cdev->pdev->devfn), | 					 PCI_SLOT(cdev->pdev->devfn), | ||||||
| 					 hwfn->abs_pf_id); | 					 hwfn->abs_pf_id); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1142,7 +1142,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops, | ||||||
| 	if (err < 0) | 	if (err < 0) | ||||||
| 		goto err_register; | 		goto err_register; | ||||||
| 
 | 
 | ||||||
| 	priv->xfer_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, | 	priv->xfer_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 0, | ||||||
| 					netdev_name(ndev)); | 					netdev_name(ndev)); | ||||||
| 	if (!priv->xfer_wq) { | 	if (!priv->xfer_wq) { | ||||||
| 		err = -ENOMEM; | 		err = -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -1364,14 +1364,15 @@ static int fjes_probe(struct platform_device *plat_dev) | ||||||
| 	adapter->force_reset = false; | 	adapter->force_reset = false; | ||||||
| 	adapter->open_guard = false; | 	adapter->open_guard = false; | ||||||
| 
 | 
 | ||||||
| 	adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0); | 	adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", | ||||||
|  | 					   WQ_MEM_RECLAIM | WQ_PERCPU, 0); | ||||||
| 	if (unlikely(!adapter->txrx_wq)) { | 	if (unlikely(!adapter->txrx_wq)) { | ||||||
| 		err = -ENOMEM; | 		err = -ENOMEM; | ||||||
| 		goto err_free_netdev; | 		goto err_free_netdev; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	adapter->control_wq = alloc_workqueue(DRV_NAME "/control", | 	adapter->control_wq = alloc_workqueue(DRV_NAME "/control", | ||||||
| 					      WQ_MEM_RECLAIM, 0); | 					      WQ_MEM_RECLAIM | WQ_PERCPU, 0); | ||||||
| 	if (unlikely(!adapter->control_wq)) { | 	if (unlikely(!adapter->control_wq)) { | ||||||
| 		err = -ENOMEM; | 		err = -ENOMEM; | ||||||
| 		goto err_free_txrx_wq; | 		goto err_free_txrx_wq; | ||||||
|  |  | ||||||
|  | @ -333,7 +333,8 @@ static int wg_newlink(struct net_device *dev, | ||||||
| 		goto err_free_peer_hashtable; | 		goto err_free_peer_hashtable; | ||||||
| 
 | 
 | ||||||
| 	wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s", | 	wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s", | ||||||
| 			WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name); | 			WQ_CPU_INTENSIVE | WQ_FREEZABLE | WQ_PERCPU, 0, | ||||||
|  | 			dev->name); | ||||||
| 	if (!wg->handshake_receive_wq) | 	if (!wg->handshake_receive_wq) | ||||||
| 		goto err_free_index_hashtable; | 		goto err_free_index_hashtable; | ||||||
| 
 | 
 | ||||||
|  | @ -343,7 +344,8 @@ static int wg_newlink(struct net_device *dev, | ||||||
| 		goto err_destroy_handshake_receive; | 		goto err_destroy_handshake_receive; | ||||||
| 
 | 
 | ||||||
| 	wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s", | 	wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s", | ||||||
| 			WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name); | 			WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_PERCPU, 0, | ||||||
|  | 			dev->name); | ||||||
| 	if (!wg->packet_crypt_wq) | 	if (!wg->packet_crypt_wq) | ||||||
| 		goto err_destroy_handshake_send; | 		goto err_destroy_handshake_send; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1085,7 +1085,8 @@ static void t7xx_dpmaif_bat_release_work(struct work_struct *work) | ||||||
| int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl) | int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl) | ||||||
| { | { | ||||||
| 	dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue", | 	dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue", | ||||||
| 						      WQ_MEM_RECLAIM, 1); | 						      WQ_MEM_RECLAIM | WQ_PERCPU, | ||||||
|  | 						      1); | ||||||
| 	if (!dpmaif_ctrl->bat_release_wq) | 	if (!dpmaif_ctrl->bat_release_wq) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -509,7 +509,7 @@ static int __init wwan_hwsim_init(void) | ||||||
| 	if (wwan_hwsim_devsnum < 0 || wwan_hwsim_devsnum > 128) | 	if (wwan_hwsim_devsnum < 0 || wwan_hwsim_devsnum > 128) | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
| 
 | 
 | ||||||
| 	wwan_wq = alloc_workqueue("wwan_wq", 0, 0); | 	wwan_wq = alloc_workqueue("wwan_wq", WQ_PERCPU, 0); | ||||||
| 	if (!wwan_wq) | 	if (!wwan_wq) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -252,7 +252,8 @@ int __init ceph_msgr_init(void) | ||||||
| 	 * The number of active work items is limited by the number of | 	 * The number of active work items is limited by the number of | ||||||
| 	 * connections, so leave @max_active at default. | 	 * connections, so leave @max_active at default. | ||||||
| 	 */ | 	 */ | ||||||
| 	ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0); | 	ceph_msgr_wq = alloc_workqueue("ceph-msgr", | ||||||
|  | 				       WQ_MEM_RECLAIM | WQ_PERCPU, 0); | ||||||
| 	if (ceph_msgr_wq) | 	if (ceph_msgr_wq) | ||||||
| 		return 0; | 		return 0; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -348,7 +348,7 @@ static struct pernet_operations diag_net_ops = { | ||||||
| 
 | 
 | ||||||
| static int __init sock_diag_init(void) | static int __init sock_diag_init(void) | ||||||
| { | { | ||||||
| 	broadcast_wq = alloc_workqueue("sock_diag_events", 0, 0); | 	broadcast_wq = alloc_workqueue("sock_diag_events", WQ_PERCPU, 0); | ||||||
| 	BUG_ON(!broadcast_wq); | 	BUG_ON(!broadcast_wq); | ||||||
| 	return register_pernet_subsys(&diag_net_ops); | 	return register_pernet_subsys(&diag_net_ops); | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -672,7 +672,8 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev, | ||||||
| 
 | 
 | ||||||
| int rds_ib_mr_init(void) | int rds_ib_mr_init(void) | ||||||
| { | { | ||||||
| 	rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd", WQ_MEM_RECLAIM, 0); | 	rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd", | ||||||
|  | 				       WQ_MEM_RECLAIM | WQ_PERCPU, 0); | ||||||
| 	if (!rds_ib_mr_wq) | 	if (!rds_ib_mr_wq) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	return 0; | 	return 0; | ||||||
|  |  | ||||||
|  | @ -630,7 +630,7 @@ static int __init rxperf_init(void) | ||||||
| 
 | 
 | ||||||
| 	pr_info("Server registering\n"); | 	pr_info("Server registering\n"); | ||||||
| 
 | 
 | ||||||
| 	rxperf_workqueue = alloc_workqueue("rxperf", 0, 0); | 	rxperf_workqueue = alloc_workqueue("rxperf", WQ_PERCPU, 0); | ||||||
| 	if (!rxperf_workqueue) | 	if (!rxperf_workqueue) | ||||||
| 		goto error_workqueue; | 		goto error_workqueue; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -3535,15 +3535,15 @@ static int __init smc_init(void) | ||||||
| 
 | 
 | ||||||
| 	rc = -ENOMEM; | 	rc = -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 	smc_tcp_ls_wq = alloc_workqueue("smc_tcp_ls_wq", 0, 0); | 	smc_tcp_ls_wq = alloc_workqueue("smc_tcp_ls_wq", WQ_PERCPU, 0); | ||||||
| 	if (!smc_tcp_ls_wq) | 	if (!smc_tcp_ls_wq) | ||||||
| 		goto out_pnet; | 		goto out_pnet; | ||||||
| 
 | 
 | ||||||
| 	smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0); | 	smc_hs_wq = alloc_workqueue("smc_hs_wq", WQ_PERCPU, 0); | ||||||
| 	if (!smc_hs_wq) | 	if (!smc_hs_wq) | ||||||
| 		goto out_alloc_tcp_ls_wq; | 		goto out_alloc_tcp_ls_wq; | ||||||
| 
 | 
 | ||||||
| 	smc_close_wq = alloc_workqueue("smc_close_wq", 0, 0); | 	smc_close_wq = alloc_workqueue("smc_close_wq", WQ_PERCPU, 0); | ||||||
| 	if (!smc_close_wq) | 	if (!smc_close_wq) | ||||||
| 		goto out_alloc_hs_wq; | 		goto out_alloc_hs_wq; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -896,7 +896,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini) | ||||||
| 		rc = SMC_CLC_DECL_MEM; | 		rc = SMC_CLC_DECL_MEM; | ||||||
| 		goto ism_put_vlan; | 		goto ism_put_vlan; | ||||||
| 	} | 	} | ||||||
| 	lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", 0, 0, | 	lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", WQ_PERCPU, 0, | ||||||
| 				     SMC_LGR_ID_SIZE, &lgr->id); | 				     SMC_LGR_ID_SIZE, &lgr->id); | ||||||
| 	if (!lgr->tx_wq) { | 	if (!lgr->tx_wq) { | ||||||
| 		rc = -ENOMEM; | 		rc = -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -1412,7 +1412,7 @@ int __init tls_device_init(void) | ||||||
| 	if (!dummy_page) | 	if (!dummy_page) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 	destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0); | 	destruct_wq = alloc_workqueue("ktls_device_destruct", WQ_PERCPU, 0); | ||||||
| 	if (!destruct_wq) { | 	if (!destruct_wq) { | ||||||
| 		err = -ENOMEM; | 		err = -ENOMEM; | ||||||
| 		goto err_free_dummy; | 		goto err_free_dummy; | ||||||
|  |  | ||||||
|  | @ -926,7 +926,7 @@ static int __init virtio_vsock_init(void) | ||||||
| { | { | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 
 | ||||||
| 	virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); | 	virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", WQ_PERCPU, 0); | ||||||
| 	if (!virtio_vsock_workqueue) | 	if (!virtio_vsock_workqueue) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -139,7 +139,7 @@ static int __init vsock_loopback_init(void) | ||||||
| 	struct vsock_loopback *vsock = &the_vsock_loopback; | 	struct vsock_loopback *vsock = &the_vsock_loopback; | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 
 | ||||||
| 	vsock->workqueue = alloc_workqueue("vsock-loopback", 0, 0); | 	vsock->workqueue = alloc_workqueue("vsock-loopback", WQ_PERCPU, 0); | ||||||
| 	if (!vsock->workqueue) | 	if (!vsock->workqueue) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Marco Crivellari
						Marco Crivellari