mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-01 00:58:39 +02:00 
			
		
		
		
	block: remove BLK_MQ_F_SHOULD_MERGE
BLK_MQ_F_SHOULD_MERGE is set for all tag_sets except those that purely process passthrough commands (bsg-lib, ufs tmf, various nvme admin queues) and thus don't even check the flag. Remove it to simplify the driver interface. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20241219060214.1928848-1-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									9bc1e897a8
								
							
						
					
					
						commit
						cc76ace465
					
				
					 34 changed files with 15 additions and 43 deletions
				
			
		|  | @ -865,7 +865,6 @@ static int ubd_add(int n, char **error_out) | |||
| 	ubd_dev->tag_set.ops = &ubd_mq_ops; | ||||
| 	ubd_dev->tag_set.queue_depth = 64; | ||||
| 	ubd_dev->tag_set.numa_node = NUMA_NO_NODE; | ||||
| 	ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 	ubd_dev->tag_set.driver_data = ubd_dev; | ||||
| 	ubd_dev->tag_set.nr_hw_queues = 1; | ||||
| 
 | ||||
|  |  | |||
|  | @ -181,7 +181,6 @@ static const char *const alloc_policy_name[] = { | |||
| 
 | ||||
| #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name | ||||
| static const char *const hctx_flag_name[] = { | ||||
| 	HCTX_FLAG_NAME(SHOULD_MERGE), | ||||
| 	HCTX_FLAG_NAME(TAG_QUEUE_SHARED), | ||||
| 	HCTX_FLAG_NAME(STACKING), | ||||
| 	HCTX_FLAG_NAME(TAG_HCTX_SHARED), | ||||
|  |  | |||
|  | @ -351,8 +351,7 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, | |||
| 	ctx = blk_mq_get_ctx(q); | ||||
| 	hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); | ||||
| 	type = hctx->type; | ||||
| 	if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) || | ||||
| 	    list_empty_careful(&ctx->rq_lists[type])) | ||||
| 	if (list_empty_careful(&ctx->rq_lists[type])) | ||||
| 		goto out_put; | ||||
| 
 | ||||
| 	/* default per sw-queue merge */ | ||||
|  |  | |||
|  | @ -1819,7 +1819,6 @@ static int fd_alloc_drive(int drive) | |||
| 	unit[drive].tag_set.nr_maps = 1; | ||||
| 	unit[drive].tag_set.queue_depth = 2; | ||||
| 	unit[drive].tag_set.numa_node = NUMA_NO_NODE; | ||||
| 	unit[drive].tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 	if (blk_mq_alloc_tag_set(&unit[drive].tag_set)) | ||||
| 		goto out_cleanup_trackbuf; | ||||
| 
 | ||||
|  |  | |||
|  | @ -368,7 +368,6 @@ aoeblk_gdalloc(void *vp) | |||
| 	set->nr_hw_queues = 1; | ||||
| 	set->queue_depth = 128; | ||||
| 	set->numa_node = NUMA_NO_NODE; | ||||
| 	set->flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 	err = blk_mq_alloc_tag_set(set); | ||||
| 	if (err) { | ||||
| 		pr_err("aoe: cannot allocate tag set for %ld.%d\n", | ||||
|  |  | |||
|  | @ -2088,7 +2088,6 @@ static int __init atari_floppy_init (void) | |||
| 		unit[i].tag_set.nr_maps = 1; | ||||
| 		unit[i].tag_set.queue_depth = 2; | ||||
| 		unit[i].tag_set.numa_node = NUMA_NO_NODE; | ||||
| 		unit[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 		ret = blk_mq_alloc_tag_set(&unit[i].tag_set); | ||||
| 		if (ret) | ||||
| 			goto err; | ||||
|  |  | |||
|  | @ -4596,7 +4596,6 @@ static int __init do_floppy_init(void) | |||
| 		tag_sets[drive].nr_maps = 1; | ||||
| 		tag_sets[drive].queue_depth = 2; | ||||
| 		tag_sets[drive].numa_node = NUMA_NO_NODE; | ||||
| 		tag_sets[drive].flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 		err = blk_mq_alloc_tag_set(&tag_sets[drive]); | ||||
| 		if (err) | ||||
| 			goto out_put_disk; | ||||
|  |  | |||
|  | @ -2023,8 +2023,7 @@ static int loop_add(int i) | |||
| 	lo->tag_set.queue_depth = hw_queue_depth; | ||||
| 	lo->tag_set.numa_node = NUMA_NO_NODE; | ||||
| 	lo->tag_set.cmd_size = sizeof(struct loop_cmd); | ||||
| 	lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING | | ||||
| 		BLK_MQ_F_NO_SCHED_BY_DEFAULT; | ||||
| 	lo->tag_set.flags = BLK_MQ_F_STACKING | BLK_MQ_F_NO_SCHED_BY_DEFAULT; | ||||
| 	lo->tag_set.driver_data = lo; | ||||
| 
 | ||||
| 	err = blk_mq_alloc_tag_set(&lo->tag_set); | ||||
|  |  | |||
|  | @ -3416,7 +3416,6 @@ static int mtip_block_initialize(struct driver_data *dd) | |||
| 	dd->tags.reserved_tags = 1; | ||||
| 	dd->tags.cmd_size = sizeof(struct mtip_cmd); | ||||
| 	dd->tags.numa_node = dd->numa_node; | ||||
| 	dd->tags.flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 	dd->tags.driver_data = dd; | ||||
| 	dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS; | ||||
| 
 | ||||
|  |  | |||
|  | @ -1841,8 +1841,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs) | |||
| 	nbd->tag_set.queue_depth = 128; | ||||
| 	nbd->tag_set.numa_node = NUMA_NO_NODE; | ||||
| 	nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); | ||||
| 	nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | | ||||
| 		BLK_MQ_F_BLOCKING; | ||||
| 	nbd->tag_set.flags = BLK_MQ_F_BLOCKING; | ||||
| 	nbd->tag_set.driver_data = nbd; | ||||
| 	INIT_WORK(&nbd->remove_work, nbd_dev_remove_work); | ||||
| 	nbd->backend = NULL; | ||||
|  |  | |||
|  | @ -1791,7 +1791,6 @@ static int null_init_global_tag_set(void) | |||
| 	tag_set.nr_hw_queues = g_submit_queues; | ||||
| 	tag_set.queue_depth = g_hw_queue_depth; | ||||
| 	tag_set.numa_node = g_home_node; | ||||
| 	tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 	if (g_no_sched) | ||||
| 		tag_set.flags |= BLK_MQ_F_NO_SCHED; | ||||
| 	if (g_shared_tag_bitmap) | ||||
|  | @ -1817,7 +1816,6 @@ static int null_setup_tagset(struct nullb *nullb) | |||
| 	nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues; | ||||
| 	nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth; | ||||
| 	nullb->tag_set->numa_node = nullb->dev->home_node; | ||||
| 	nullb->tag_set->flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 	if (nullb->dev->no_sched) | ||||
| 		nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED; | ||||
| 	if (nullb->dev->shared_tag_bitmap) | ||||
|  |  | |||
|  | @ -434,8 +434,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) | |||
| 
 | ||||
| 	ps3disk_identify(dev); | ||||
| 
 | ||||
| 	error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1, | ||||
| 					BLK_MQ_F_SHOULD_MERGE); | ||||
| 	error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1, 0); | ||||
| 	if (error) | ||||
| 		goto fail_teardown; | ||||
| 
 | ||||
|  |  | |||
|  | @ -4964,7 +4964,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) | |||
| 	rbd_dev->tag_set.ops = &rbd_mq_ops; | ||||
| 	rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth; | ||||
| 	rbd_dev->tag_set.numa_node = NUMA_NO_NODE; | ||||
| 	rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 	rbd_dev->tag_set.nr_hw_queues = num_present_cpus(); | ||||
| 	rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request); | ||||
| 
 | ||||
|  |  | |||
|  | @ -1209,8 +1209,7 @@ static int setup_mq_tags(struct rnbd_clt_session *sess) | |||
| 	tag_set->ops		= &rnbd_mq_ops; | ||||
| 	tag_set->queue_depth	= sess->queue_depth; | ||||
| 	tag_set->numa_node		= NUMA_NO_NODE; | ||||
| 	tag_set->flags		= BLK_MQ_F_SHOULD_MERGE | | ||||
| 				  BLK_MQ_F_TAG_QUEUE_SHARED; | ||||
| 	tag_set->flags		= BLK_MQ_F_TAG_QUEUE_SHARED; | ||||
| 	tag_set->cmd_size	= sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE; | ||||
| 
 | ||||
| 	/* for HCTX_TYPE_DEFAULT, HCTX_TYPE_READ, HCTX_TYPE_POLL */ | ||||
|  |  | |||
|  | @ -829,7 +829,7 @@ static int probe_disk(struct vdc_port *port) | |||
| 	} | ||||
| 
 | ||||
| 	err = blk_mq_alloc_sq_tag_set(&port->tag_set, &vdc_mq_ops, | ||||
| 			VDC_TX_RING_SIZE, BLK_MQ_F_SHOULD_MERGE); | ||||
| 			VDC_TX_RING_SIZE, 0); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
|  |  | |||
|  | @ -818,7 +818,7 @@ static int swim_floppy_init(struct swim_priv *swd) | |||
| 
 | ||||
| 	for (drive = 0; drive < swd->floppy_count; drive++) { | ||||
| 		err = blk_mq_alloc_sq_tag_set(&swd->unit[drive].tag_set, | ||||
| 				&swim_mq_ops, 2, BLK_MQ_F_SHOULD_MERGE); | ||||
| 				&swim_mq_ops, 2, 0); | ||||
| 		if (err) | ||||
| 			goto exit_put_disks; | ||||
| 
 | ||||
|  |  | |||
|  | @ -1208,8 +1208,7 @@ static int swim3_attach(struct macio_dev *mdev, | |||
| 	fs = &floppy_states[floppy_count]; | ||||
| 	memset(fs, 0, sizeof(*fs)); | ||||
| 
 | ||||
| 	rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2, | ||||
| 			BLK_MQ_F_SHOULD_MERGE); | ||||
| 	rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2, 0); | ||||
| 	if (rc) | ||||
| 		goto out_unregister; | ||||
| 
 | ||||
|  |  | |||
|  | @ -2205,7 +2205,6 @@ static int ublk_add_tag_set(struct ublk_device *ub) | |||
| 	ub->tag_set.queue_depth = ub->dev_info.queue_depth; | ||||
| 	ub->tag_set.numa_node = NUMA_NO_NODE; | ||||
| 	ub->tag_set.cmd_size = sizeof(struct ublk_rq_data); | ||||
| 	ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 	ub->tag_set.driver_data = ub; | ||||
| 	return blk_mq_alloc_tag_set(&ub->tag_set); | ||||
| } | ||||
|  |  | |||
|  | @ -1481,7 +1481,6 @@ static int virtblk_probe(struct virtio_device *vdev) | |||
| 	vblk->tag_set.ops = &virtio_mq_ops; | ||||
| 	vblk->tag_set.queue_depth = queue_depth; | ||||
| 	vblk->tag_set.numa_node = NUMA_NO_NODE; | ||||
| 	vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 	vblk->tag_set.cmd_size = | ||||
| 		sizeof(struct virtblk_req) + | ||||
| 		sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT; | ||||
|  |  | |||
|  | @ -1131,7 +1131,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, | |||
| 	} else | ||||
| 		info->tag_set.queue_depth = BLK_RING_SIZE(info); | ||||
| 	info->tag_set.numa_node = NUMA_NO_NODE; | ||||
| 	info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 	info->tag_set.cmd_size = sizeof(struct blkif_req); | ||||
| 	info->tag_set.driver_data = info; | ||||
| 
 | ||||
|  |  | |||
|  | @ -354,7 +354,6 @@ static int __init z2_init(void) | |||
| 	tag_set.nr_maps = 1; | ||||
| 	tag_set.queue_depth = 16; | ||||
| 	tag_set.numa_node = NUMA_NO_NODE; | ||||
| 	tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 	ret = blk_mq_alloc_tag_set(&tag_set); | ||||
| 	if (ret) | ||||
| 		goto out_unregister_blkdev; | ||||
|  |  | |||
|  | @ -777,7 +777,7 @@ static int probe_gdrom(struct platform_device *devptr) | |||
| 	probe_gdrom_setupcd(); | ||||
| 
 | ||||
| 	err = blk_mq_alloc_sq_tag_set(&gd.tag_set, &gdrom_mq_ops, 1, | ||||
| 				BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING); | ||||
| 				BLK_MQ_F_BLOCKING); | ||||
| 	if (err) | ||||
| 		goto probe_fail_free_cd_info; | ||||
| 
 | ||||
|  |  | |||
|  | @ -547,7 +547,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) | |||
| 	md->tag_set->ops = &dm_mq_ops; | ||||
| 	md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); | ||||
| 	md->tag_set->numa_node = md->numa_node_id; | ||||
| 	md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING; | ||||
| 	md->tag_set->flags = BLK_MQ_F_STACKING; | ||||
| 	md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); | ||||
| 	md->tag_set->driver_data = md; | ||||
| 
 | ||||
|  |  | |||
|  | @ -2094,8 +2094,7 @@ static int msb_init_disk(struct memstick_dev *card) | |||
| 	if (msb->disk_id  < 0) | ||||
| 		return msb->disk_id; | ||||
| 
 | ||||
| 	rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2, | ||||
| 				     BLK_MQ_F_SHOULD_MERGE); | ||||
| 	rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2, 0); | ||||
| 	if (rc) | ||||
| 		goto out_release_id; | ||||
| 
 | ||||
|  |  | |||
|  | @ -1139,8 +1139,7 @@ static int mspro_block_init_disk(struct memstick_dev *card) | |||
| 	if (disk_id < 0) | ||||
| 		return disk_id; | ||||
| 
 | ||||
| 	rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &mspro_mq_ops, 2, | ||||
| 				     BLK_MQ_F_SHOULD_MERGE); | ||||
| 	rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &mspro_mq_ops, 2, 0); | ||||
| 	if (rc) | ||||
| 		goto out_release_id; | ||||
| 
 | ||||
|  |  | |||
|  | @ -441,7 +441,7 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
| 	else | ||||
| 		mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; | ||||
| 	mq->tag_set.numa_node = NUMA_NO_NODE; | ||||
| 	mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; | ||||
| 	mq->tag_set.flags = BLK_MQ_F_BLOCKING; | ||||
| 	mq->tag_set.nr_hw_queues = 1; | ||||
| 	mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); | ||||
| 	mq->tag_set.driver_data = mq; | ||||
|  |  | |||
|  | @ -329,7 +329,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
| 		goto out_list_del; | ||||
| 
 | ||||
| 	ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2, | ||||
| 			BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING); | ||||
| 			BLK_MQ_F_BLOCKING); | ||||
| 	if (ret) | ||||
| 		goto out_kfree_tag_set; | ||||
| 	 | ||||
|  |  | |||
|  | @ -383,7 +383,7 @@ int ubiblock_create(struct ubi_volume_info *vi) | |||
| 	dev->tag_set.ops = &ubiblock_mq_ops; | ||||
| 	dev->tag_set.queue_depth = 64; | ||||
| 	dev->tag_set.numa_node = NUMA_NO_NODE; | ||||
| 	dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; | ||||
| 	dev->tag_set.flags = BLK_MQ_F_BLOCKING; | ||||
| 	dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu); | ||||
| 	dev->tag_set.driver_data = dev; | ||||
| 	dev->tag_set.nr_hw_queues = 1; | ||||
|  |  | |||
|  | @ -1275,7 +1275,6 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv) | |||
| 	anv->tagset.timeout = NVME_IO_TIMEOUT; | ||||
| 	anv->tagset.numa_node = NUMA_NO_NODE; | ||||
| 	anv->tagset.cmd_size = sizeof(struct apple_nvme_iod); | ||||
| 	anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 	anv->tagset.driver_data = &anv->ioq; | ||||
| 
 | ||||
| 	ret = blk_mq_alloc_tag_set(&anv->tagset); | ||||
|  |  | |||
|  | @ -4639,7 +4639,6 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, | |||
| 		/* Reserved for fabric connect */ | ||||
| 		set->reserved_tags = 1; | ||||
| 	set->numa_node = ctrl->numa_node; | ||||
| 	set->flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 	if (ctrl->ops->flags & NVME_F_BLOCKING) | ||||
| 		set->flags |= BLK_MQ_F_BLOCKING; | ||||
| 	set->cmd_size = cmd_size; | ||||
|  |  | |||
|  | @ -56,7 +56,6 @@ int dasd_gendisk_alloc(struct dasd_block *block) | |||
| 	block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); | ||||
| 	block->tag_set.nr_hw_queues = nr_hw_queues; | ||||
| 	block->tag_set.queue_depth = queue_depth; | ||||
| 	block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 	block->tag_set.numa_node = NUMA_NO_NODE; | ||||
| 	rc = blk_mq_alloc_tag_set(&block->tag_set); | ||||
| 	if (rc) | ||||
|  |  | |||
|  | @ -461,7 +461,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) | |||
| 	bdev->tag_set.cmd_size = sizeof(blk_status_t); | ||||
| 	bdev->tag_set.nr_hw_queues = nr_requests; | ||||
| 	bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; | ||||
| 	bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 	bdev->tag_set.numa_node = NUMA_NO_NODE; | ||||
| 
 | ||||
| 	ret = blk_mq_alloc_tag_set(&bdev->tag_set); | ||||
|  |  | |||
|  | @ -2065,7 +2065,6 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) | |||
| 	tag_set->queue_depth = shost->can_queue; | ||||
| 	tag_set->cmd_size = cmd_size; | ||||
| 	tag_set->numa_node = dev_to_node(shost->dma_dev); | ||||
| 	tag_set->flags = BLK_MQ_F_SHOULD_MERGE; | ||||
| 	tag_set->flags |= | ||||
| 		BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); | ||||
| 	if (shost->queuecommand_may_block) | ||||
|  |  | |||
|  | @ -668,7 +668,6 @@ struct blk_mq_ops { | |||
| 
 | ||||
| /* Keep hctx_flag_name[] in sync with the definitions below */ | ||||
| enum { | ||||
| 	BLK_MQ_F_SHOULD_MERGE	= 1 << 0, | ||||
| 	BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1, | ||||
| 	/*
 | ||||
| 	 * Set when this device requires underlying blk-mq device for | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Christoph Hellwig
						Christoph Hellwig