mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	block-5.19-2022-06-24
-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmK19ZkQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpsyBD/9whWHSyTpVftMJHJC64E+orbjeu2jFOg3P
 XjrwQ6caqCu+JjsnZWi7EzQv/UyJrWgjX3blPOl+1kiiM4LyPkzHlaFTJFZWNmUz
 6JFmAGWybOywUauwynIPT32Jc7ccbqGMy7QbEUWNiAxa384F57fPx2rUcEgD8nSB
 GGr+ljZdEcfdJ6BRrwBexfS07aVrt8SYwnluO6h1YHbxlO9cw+6ga8Y0z6ncBVFW
 XDqkzwH3oqUaGlHqu879rtQpedaB2zz/sUvWiy+KbBTyN4K/vE5ja3/x8OyIcfIU
 8LWwOCSOZeo1i7juXNFr7Ay1ldGsn6D5QeT0oNGDUh893JUz4kVI+iyoIqvVkLTE
 4MQohMddahlsf95sH/xuvJQ2WMXmy1ZjkaaDCws55zqIr6baRuI44jOFElKy2Peu
 0X7qWwri9Uk/zzCo6LhOKbcvLBriy89YchY9I3thSWIrSTx9QoIbgs8z2kG2qYx8
 pbfSKASUfyXcdgOIanCmS2+On7T7lDLEflCm/WvTDoc07OI+CpsjnIFv1CtB/sy2
 bwC35rCuo+EukDZiFDtsNsvbeVnCo3l0WC+rozean5j6zz64hdpC/eofwJmn2RfZ
 U9Ob0aDl4P+8Jta6Iqlfae78ZPAPcCzyvLb0k0l3ceMIY65x3Ss85JEFF7SA/VwJ
 /Dn12frm4Q==
 =3uDs
 -----END PGP SIGNATURE-----
Merge tag 'block-5.19-2022-06-24' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe:
 - Series fixing issues with sysfs locking and name reuse (Christoph)
 - NVMe pull request via Christoph:
      - Fix the mixed up CRIMS/CRWMS constants (Joel Granados)
      - Add another broken identifier quirk (Leo Savernik)
      - Fix up a quirk because Samsung reuses PCI IDs over different
        products (Christoph Hellwig)
 - Remove old WARN_ON() that doesn't apply anymore (Li)
 - Fix for using a stale cached request value for rq-qos throttling
   mechanisms that may schedule(), like iocost (me)
 - Remove unused parameter to blk_independent_access_range() (Damien)
* tag 'block-5.19-2022-06-24' of git://git.kernel.dk/linux-block:
  block: remove WARN_ON() from bd_link_disk_holder
  nvme: move the Samsung X5 quirk entry to the core quirks
  nvme: fix the CRIMS and CRWMS definitions to match the spec
  nvme: add a bogus subsystem NQN quirk for Micron MTFDKBA2T0TFH
  block: pop cached rq before potentially blocking rq_qos_throttle()
  block: remove queue from struct blk_independent_access_range
  block: freeze the queue earlier in del_gendisk
  block: remove per-disk debugfs files in blk_unregister_queue
  block: serialize all debugfs operations using q->debugfs_mutex
  block: disable the elevator int del_gendisk
			
			
This commit is contained in:
		
						commit
						a237cfd6b7
					
				
					 16 changed files with 91 additions and 105 deletions
				
			
		| 
						 | 
				
			
			@ -322,19 +322,6 @@ void blk_cleanup_queue(struct request_queue *q)
 | 
			
		|||
		blk_mq_exit_queue(q);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * In theory, request pool of sched_tags belongs to request queue.
 | 
			
		||||
	 * However, the current implementation requires tag_set for freeing
 | 
			
		||||
	 * requests, so free the pool now.
 | 
			
		||||
	 *
 | 
			
		||||
	 * Queue has become frozen, there can't be any in-queue requests, so
 | 
			
		||||
	 * it is safe to free requests now.
 | 
			
		||||
	 */
 | 
			
		||||
	mutex_lock(&q->sysfs_lock);
 | 
			
		||||
	if (q->elevator)
 | 
			
		||||
		blk_mq_sched_free_rqs(q);
 | 
			
		||||
	mutex_unlock(&q->sysfs_lock);
 | 
			
		||||
 | 
			
		||||
	/* @q is and will stay empty, shutdown and put */
 | 
			
		||||
	blk_put_queue(q);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -144,7 +144,6 @@ int disk_register_independent_access_ranges(struct gendisk *disk,
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < iars->nr_ia_ranges; i++) {
 | 
			
		||||
		iars->ia_range[i].queue = q;
 | 
			
		||||
		ret = kobject_init_and_add(&iars->ia_range[i].kobj,
 | 
			
		||||
					   &blk_ia_range_ktype, &iars->kobj,
 | 
			
		||||
					   "%d", i);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -711,11 +711,6 @@ void blk_mq_debugfs_register(struct request_queue *q)
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void blk_mq_debugfs_unregister(struct request_queue *q)
 | 
			
		||||
{
 | 
			
		||||
	q->sched_debugfs_dir = NULL;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
 | 
			
		||||
					struct blk_mq_ctx *ctx)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -746,6 +741,8 @@ void blk_mq_debugfs_register_hctx(struct request_queue *q,
 | 
			
		|||
 | 
			
		||||
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
 | 
			
		||||
{
 | 
			
		||||
	if (!hctx->queue->debugfs_dir)
 | 
			
		||||
		return;
 | 
			
		||||
	debugfs_remove_recursive(hctx->debugfs_dir);
 | 
			
		||||
	hctx->sched_debugfs_dir = NULL;
 | 
			
		||||
	hctx->debugfs_dir = NULL;
 | 
			
		||||
| 
						 | 
				
			
			@ -773,6 +770,8 @@ void blk_mq_debugfs_register_sched(struct request_queue *q)
 | 
			
		|||
{
 | 
			
		||||
	struct elevator_type *e = q->elevator->type;
 | 
			
		||||
 | 
			
		||||
	lockdep_assert_held(&q->debugfs_mutex);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * If the parent directory has not been created yet, return, we will be
 | 
			
		||||
	 * called again later on and the directory/files will be created then.
 | 
			
		||||
| 
						 | 
				
			
			@ -790,6 +789,8 @@ void blk_mq_debugfs_register_sched(struct request_queue *q)
 | 
			
		|||
 | 
			
		||||
void blk_mq_debugfs_unregister_sched(struct request_queue *q)
 | 
			
		||||
{
 | 
			
		||||
	lockdep_assert_held(&q->debugfs_mutex);
 | 
			
		||||
 | 
			
		||||
	debugfs_remove_recursive(q->sched_debugfs_dir);
 | 
			
		||||
	q->sched_debugfs_dir = NULL;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -811,6 +812,10 @@ static const char *rq_qos_id_to_name(enum rq_qos_id id)
 | 
			
		|||
 | 
			
		||||
void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
 | 
			
		||||
{
 | 
			
		||||
	lockdep_assert_held(&rqos->q->debugfs_mutex);
 | 
			
		||||
 | 
			
		||||
	if (!rqos->q->debugfs_dir)
 | 
			
		||||
		return;
 | 
			
		||||
	debugfs_remove_recursive(rqos->debugfs_dir);
 | 
			
		||||
	rqos->debugfs_dir = NULL;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -820,6 +825,8 @@ void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
 | 
			
		|||
	struct request_queue *q = rqos->q;
 | 
			
		||||
	const char *dir_name = rq_qos_id_to_name(rqos->id);
 | 
			
		||||
 | 
			
		||||
	lockdep_assert_held(&q->debugfs_mutex);
 | 
			
		||||
 | 
			
		||||
	if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -833,17 +840,13 @@ void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
 | 
			
		|||
	debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
 | 
			
		||||
{
 | 
			
		||||
	debugfs_remove_recursive(q->rqos_debugfs_dir);
 | 
			
		||||
	q->rqos_debugfs_dir = NULL;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
 | 
			
		||||
					struct blk_mq_hw_ctx *hctx)
 | 
			
		||||
{
 | 
			
		||||
	struct elevator_type *e = q->elevator->type;
 | 
			
		||||
 | 
			
		||||
	lockdep_assert_held(&q->debugfs_mutex);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * If the parent debugfs directory has not been created yet, return;
 | 
			
		||||
	 * We will be called again later on with appropriate parent debugfs
 | 
			
		||||
| 
						 | 
				
			
			@ -863,6 +866,10 @@ void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
 | 
			
		|||
 | 
			
		||||
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
 | 
			
		||||
{
 | 
			
		||||
	lockdep_assert_held(&hctx->queue->debugfs_mutex);
 | 
			
		||||
 | 
			
		||||
	if (!hctx->queue->debugfs_dir)
 | 
			
		||||
		return;
 | 
			
		||||
	debugfs_remove_recursive(hctx->sched_debugfs_dir);
 | 
			
		||||
	hctx->sched_debugfs_dir = NULL;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -21,7 +21,6 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
 | 
			
		|||
int blk_mq_debugfs_rq_show(struct seq_file *m, void *v);
 | 
			
		||||
 | 
			
		||||
void blk_mq_debugfs_register(struct request_queue *q);
 | 
			
		||||
void blk_mq_debugfs_unregister(struct request_queue *q);
 | 
			
		||||
void blk_mq_debugfs_register_hctx(struct request_queue *q,
 | 
			
		||||
				  struct blk_mq_hw_ctx *hctx);
 | 
			
		||||
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
 | 
			
		||||
| 
						 | 
				
			
			@ -36,16 +35,11 @@ void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
 | 
			
		|||
 | 
			
		||||
void blk_mq_debugfs_register_rqos(struct rq_qos *rqos);
 | 
			
		||||
void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos);
 | 
			
		||||
void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q);
 | 
			
		||||
#else
 | 
			
		||||
static inline void blk_mq_debugfs_register(struct request_queue *q)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void blk_mq_debugfs_unregister(struct request_queue *q)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void blk_mq_debugfs_register_hctx(struct request_queue *q,
 | 
			
		||||
						struct blk_mq_hw_ctx *hctx)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -87,10 +81,6 @@ static inline void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
 | 
			
		|||
static inline void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_BLK_DEBUG_FS_ZONED
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -594,7 +594,9 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
 | 
			
		|||
	if (ret)
 | 
			
		||||
		goto err_free_map_and_rqs;
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&q->debugfs_mutex);
 | 
			
		||||
	blk_mq_debugfs_register_sched(q);
 | 
			
		||||
	mutex_unlock(&q->debugfs_mutex);
 | 
			
		||||
 | 
			
		||||
	queue_for_each_hw_ctx(q, hctx, i) {
 | 
			
		||||
		if (e->ops.init_hctx) {
 | 
			
		||||
| 
						 | 
				
			
			@ -607,7 +609,9 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
 | 
			
		|||
				return ret;
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		mutex_lock(&q->debugfs_mutex);
 | 
			
		||||
		blk_mq_debugfs_register_sched_hctx(q, hctx);
 | 
			
		||||
		mutex_unlock(&q->debugfs_mutex);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -648,14 +652,21 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
 | 
			
		|||
	unsigned int flags = 0;
 | 
			
		||||
 | 
			
		||||
	queue_for_each_hw_ctx(q, hctx, i) {
 | 
			
		||||
		mutex_lock(&q->debugfs_mutex);
 | 
			
		||||
		blk_mq_debugfs_unregister_sched_hctx(hctx);
 | 
			
		||||
		mutex_unlock(&q->debugfs_mutex);
 | 
			
		||||
 | 
			
		||||
		if (e->type->ops.exit_hctx && hctx->sched_data) {
 | 
			
		||||
			e->type->ops.exit_hctx(hctx, i);
 | 
			
		||||
			hctx->sched_data = NULL;
 | 
			
		||||
		}
 | 
			
		||||
		flags = hctx->flags;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&q->debugfs_mutex);
 | 
			
		||||
	blk_mq_debugfs_unregister_sched(q);
 | 
			
		||||
	mutex_unlock(&q->debugfs_mutex);
 | 
			
		||||
 | 
			
		||||
	if (e->type->ops.exit_sched)
 | 
			
		||||
		e->type->ops.exit_sched(e);
 | 
			
		||||
	blk_mq_sched_tags_teardown(q, flags);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2765,15 +2765,20 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
 | 
			
		|||
		return NULL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rq_qos_throttle(q, *bio);
 | 
			
		||||
 | 
			
		||||
	if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
 | 
			
		||||
		return NULL;
 | 
			
		||||
	if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	rq->cmd_flags = (*bio)->bi_opf;
 | 
			
		||||
	/*
 | 
			
		||||
	 * If any qos ->throttle() end up blocking, we will have flushed the
 | 
			
		||||
	 * plug and hence killed the cached_rq list as well. Pop this entry
 | 
			
		||||
	 * before we throttle.
 | 
			
		||||
	 */
 | 
			
		||||
	plug->cached_rq = rq_list_next(rq);
 | 
			
		||||
	rq_qos_throttle(q, *bio);
 | 
			
		||||
 | 
			
		||||
	rq->cmd_flags = (*bio)->bi_opf;
 | 
			
		||||
	INIT_LIST_HEAD(&rq->queuelist);
 | 
			
		||||
	return rq;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -294,8 +294,6 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
 | 
			
		|||
 | 
			
		||||
void rq_qos_exit(struct request_queue *q)
 | 
			
		||||
{
 | 
			
		||||
	blk_mq_debugfs_unregister_queue_rqos(q);
 | 
			
		||||
 | 
			
		||||
	while (q->rq_qos) {
 | 
			
		||||
		struct rq_qos *rqos = q->rq_qos;
 | 
			
		||||
		q->rq_qos = rqos->next;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -104,8 +104,11 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
 | 
			
		|||
 | 
			
		||||
	blk_mq_unfreeze_queue(q);
 | 
			
		||||
 | 
			
		||||
	if (rqos->ops->debugfs_attrs)
 | 
			
		||||
	if (rqos->ops->debugfs_attrs) {
 | 
			
		||||
		mutex_lock(&q->debugfs_mutex);
 | 
			
		||||
		blk_mq_debugfs_register_rqos(rqos);
 | 
			
		||||
		mutex_unlock(&q->debugfs_mutex);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
 | 
			
		||||
| 
						 | 
				
			
			@ -129,7 +132,9 @@ static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
 | 
			
		|||
 | 
			
		||||
	blk_mq_unfreeze_queue(q);
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&q->debugfs_mutex);
 | 
			
		||||
	blk_mq_debugfs_unregister_rqos(rqos);
 | 
			
		||||
	mutex_unlock(&q->debugfs_mutex);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -779,14 +779,6 @@ static void blk_release_queue(struct kobject *kobj)
 | 
			
		|||
	if (queue_is_mq(q))
 | 
			
		||||
		blk_mq_release(q);
 | 
			
		||||
 | 
			
		||||
	blk_trace_shutdown(q);
 | 
			
		||||
	mutex_lock(&q->debugfs_mutex);
 | 
			
		||||
	debugfs_remove_recursive(q->debugfs_dir);
 | 
			
		||||
	mutex_unlock(&q->debugfs_mutex);
 | 
			
		||||
 | 
			
		||||
	if (queue_is_mq(q))
 | 
			
		||||
		blk_mq_debugfs_unregister(q);
 | 
			
		||||
 | 
			
		||||
	bioset_exit(&q->bio_split);
 | 
			
		||||
 | 
			
		||||
	if (blk_queue_has_srcu(q))
 | 
			
		||||
| 
						 | 
				
			
			@ -836,17 +828,16 @@ int blk_register_queue(struct gendisk *disk)
 | 
			
		|||
		goto unlock;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (queue_is_mq(q))
 | 
			
		||||
		__blk_mq_register_dev(dev, q);
 | 
			
		||||
	mutex_lock(&q->sysfs_lock);
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&q->debugfs_mutex);
 | 
			
		||||
	q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
 | 
			
		||||
					    blk_debugfs_root);
 | 
			
		||||
	mutex_unlock(&q->debugfs_mutex);
 | 
			
		||||
 | 
			
		||||
	if (queue_is_mq(q)) {
 | 
			
		||||
		__blk_mq_register_dev(dev, q);
 | 
			
		||||
	if (queue_is_mq(q))
 | 
			
		||||
		blk_mq_debugfs_register(q);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&q->sysfs_lock);
 | 
			
		||||
	mutex_unlock(&q->debugfs_mutex);
 | 
			
		||||
 | 
			
		||||
	ret = disk_register_independent_access_ranges(disk, NULL);
 | 
			
		||||
	if (ret)
 | 
			
		||||
| 
						 | 
				
			
			@ -948,8 +939,15 @@ void blk_unregister_queue(struct gendisk *disk)
 | 
			
		|||
	/* Now that we've deleted all child objects, we can delete the queue. */
 | 
			
		||||
	kobject_uevent(&q->kobj, KOBJ_REMOVE);
 | 
			
		||||
	kobject_del(&q->kobj);
 | 
			
		||||
 | 
			
		||||
	mutex_unlock(&q->sysfs_dir_lock);
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&q->debugfs_mutex);
 | 
			
		||||
	blk_trace_shutdown(q);
 | 
			
		||||
	debugfs_remove_recursive(q->debugfs_dir);
 | 
			
		||||
	q->debugfs_dir = NULL;
 | 
			
		||||
	q->sched_debugfs_dir = NULL;
 | 
			
		||||
	q->rqos_debugfs_dir = NULL;
 | 
			
		||||
	mutex_unlock(&q->debugfs_mutex);
 | 
			
		||||
 | 
			
		||||
	kobject_put(&disk_to_dev(disk)->kobj);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -623,6 +623,7 @@ void del_gendisk(struct gendisk *disk)
 | 
			
		|||
	 * Prevent new I/O from crossing bio_queue_enter().
 | 
			
		||||
	 */
 | 
			
		||||
	blk_queue_start_drain(q);
 | 
			
		||||
	blk_mq_freeze_queue_wait(q);
 | 
			
		||||
 | 
			
		||||
	if (!(disk->flags & GENHD_FL_HIDDEN)) {
 | 
			
		||||
		sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
 | 
			
		||||
| 
						 | 
				
			
			@ -646,12 +647,21 @@ void del_gendisk(struct gendisk *disk)
 | 
			
		|||
	pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
 | 
			
		||||
	device_del(disk_to_dev(disk));
 | 
			
		||||
 | 
			
		||||
	blk_mq_freeze_queue_wait(q);
 | 
			
		||||
 | 
			
		||||
	blk_throtl_cancel_bios(disk->queue);
 | 
			
		||||
 | 
			
		||||
	blk_sync_queue(q);
 | 
			
		||||
	blk_flush_integrity();
 | 
			
		||||
	blk_mq_cancel_work_sync(q);
 | 
			
		||||
 | 
			
		||||
	blk_mq_quiesce_queue(q);
 | 
			
		||||
	if (q->elevator) {
 | 
			
		||||
		mutex_lock(&q->sysfs_lock);
 | 
			
		||||
		elevator_exit(q);
 | 
			
		||||
		mutex_unlock(&q->sysfs_lock);
 | 
			
		||||
	}
 | 
			
		||||
	rq_qos_exit(q);
 | 
			
		||||
	blk_mq_unquiesce_queue(q);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Allow using passthrough request again after the queue is torn down.
 | 
			
		||||
	 */
 | 
			
		||||
| 
						 | 
				
			
			@ -1120,31 +1130,6 @@ static const struct attribute_group *disk_attr_groups[] = {
 | 
			
		|||
	NULL
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static void disk_release_mq(struct request_queue *q)
 | 
			
		||||
{
 | 
			
		||||
	blk_mq_cancel_work_sync(q);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * There can't be any non non-passthrough bios in flight here, but
 | 
			
		||||
	 * requests stay around longer, including passthrough ones so we
 | 
			
		||||
	 * still need to freeze the queue here.
 | 
			
		||||
	 */
 | 
			
		||||
	blk_mq_freeze_queue(q);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Since the I/O scheduler exit code may access cgroup information,
 | 
			
		||||
	 * perform I/O scheduler exit before disassociating from the block
 | 
			
		||||
	 * cgroup controller.
 | 
			
		||||
	 */
 | 
			
		||||
	if (q->elevator) {
 | 
			
		||||
		mutex_lock(&q->sysfs_lock);
 | 
			
		||||
		elevator_exit(q);
 | 
			
		||||
		mutex_unlock(&q->sysfs_lock);
 | 
			
		||||
	}
 | 
			
		||||
	rq_qos_exit(q);
 | 
			
		||||
	__blk_mq_unfreeze_queue(q, true);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * disk_release - releases all allocated resources of the gendisk
 | 
			
		||||
 * @dev: the device representing this disk
 | 
			
		||||
| 
						 | 
				
			
			@ -1166,9 +1151,6 @@ static void disk_release(struct device *dev)
 | 
			
		|||
	might_sleep();
 | 
			
		||||
	WARN_ON_ONCE(disk_live(disk));
 | 
			
		||||
 | 
			
		||||
	if (queue_is_mq(disk->queue))
 | 
			
		||||
		disk_release_mq(disk->queue);
 | 
			
		||||
 | 
			
		||||
	blkcg_exit_queue(disk->queue);
 | 
			
		||||
 | 
			
		||||
	disk_release_events(disk);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -79,10 +79,6 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
 | 
			
		|||
 | 
			
		||||
	WARN_ON_ONCE(!bdev->bd_holder);
 | 
			
		||||
 | 
			
		||||
	/* FIXME: remove the following once add_disk() handles errors */
 | 
			
		||||
	if (WARN_ON(!bdev->bd_holder_dir))
 | 
			
		||||
		goto out_unlock;
 | 
			
		||||
 | 
			
		||||
	holder = bd_find_holder_disk(bdev, disk);
 | 
			
		||||
	if (holder) {
 | 
			
		||||
		holder->refcnt++;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2546,6 +2546,20 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
 | 
			
		|||
		.vid = 0x1e0f,
 | 
			
		||||
		.mn = "KCD6XVUL6T40",
 | 
			
		||||
		.quirks = NVME_QUIRK_NO_APST,
 | 
			
		||||
	},
 | 
			
		||||
	{
 | 
			
		||||
		/*
 | 
			
		||||
		 * The external Samsung X5 SSD fails initialization without a
 | 
			
		||||
		 * delay before checking if it is ready and has a whole set of
 | 
			
		||||
		 * other problems.  To make this even more interesting, it
 | 
			
		||||
		 * shares the PCI ID with internal Samsung 970 Evo Plus that
 | 
			
		||||
		 * does not need or want these quirks.
 | 
			
		||||
		 */
 | 
			
		||||
		.vid = 0x144d,
 | 
			
		||||
		.mn = "Samsung Portable SSD X5",
 | 
			
		||||
		.quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
 | 
			
		||||
			  NVME_QUIRK_NO_DEEPEST_PS |
 | 
			
		||||
			  NVME_QUIRK_IGNORE_DEV_SUBNQN,
 | 
			
		||||
	}
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3474,6 +3474,8 @@ static const struct pci_device_id nvme_id_table[] = {
 | 
			
		|||
	{ PCI_DEVICE(0x1cc1, 0x8201),   /* ADATA SX8200PNP 512GB */
 | 
			
		||||
		.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
 | 
			
		||||
				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
 | 
			
		||||
	 { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */
 | 
			
		||||
		.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN },
 | 
			
		||||
	{ PCI_DEVICE(0x1c5c, 0x1504),   /* SK Hynix PC400 */
 | 
			
		||||
		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
 | 
			
		||||
	{ PCI_DEVICE(0x1c5c, 0x174a),   /* SK Hynix P31 SSD */
 | 
			
		||||
| 
						 | 
				
			
			@ -3524,10 +3526,6 @@ static const struct pci_device_id nvme_id_table[] = {
 | 
			
		|||
				NVME_QUIRK_128_BYTES_SQES |
 | 
			
		||||
				NVME_QUIRK_SHARED_TAGS |
 | 
			
		||||
				NVME_QUIRK_SKIP_CID_GEN },
 | 
			
		||||
	{ PCI_DEVICE(0x144d, 0xa808),   /* Samsung X5 */
 | 
			
		||||
		.driver_data =  NVME_QUIRK_DELAY_BEFORE_CHK_RDY|
 | 
			
		||||
				NVME_QUIRK_NO_DEEPEST_PS |
 | 
			
		||||
				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
 | 
			
		||||
	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
 | 
			
		||||
	{ 0, }
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -342,7 +342,6 @@ static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
 | 
			
		|||
 */
 | 
			
		||||
struct blk_independent_access_range {
 | 
			
		||||
	struct kobject		kobj;
 | 
			
		||||
	struct request_queue	*queue;
 | 
			
		||||
	sector_t		sector;
 | 
			
		||||
	sector_t		nr_sectors;
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			@ -482,7 +481,6 @@ struct request_queue {
 | 
			
		|||
#endif /* CONFIG_BLK_DEV_ZONED */
 | 
			
		||||
 | 
			
		||||
	int			node;
 | 
			
		||||
	struct mutex		debugfs_mutex;
 | 
			
		||||
#ifdef CONFIG_BLK_DEV_IO_TRACE
 | 
			
		||||
	struct blk_trace __rcu	*blk_trace;
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			@ -526,11 +524,12 @@ struct request_queue {
 | 
			
		|||
	struct bio_set		bio_split;
 | 
			
		||||
 | 
			
		||||
	struct dentry		*debugfs_dir;
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_BLK_DEBUG_FS
 | 
			
		||||
	struct dentry		*sched_debugfs_dir;
 | 
			
		||||
	struct dentry		*rqos_debugfs_dir;
 | 
			
		||||
#endif
 | 
			
		||||
	/*
 | 
			
		||||
	 * Serializes all debugfs metadata operations using the above dentries.
 | 
			
		||||
	 */
 | 
			
		||||
	struct mutex		debugfs_mutex;
 | 
			
		||||
 | 
			
		||||
	bool			mq_sysfs_init_done;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -233,8 +233,8 @@ enum {
 | 
			
		|||
};
 | 
			
		||||
 | 
			
		||||
enum {
 | 
			
		||||
	NVME_CAP_CRMS_CRIMS	= 1ULL << 59,
 | 
			
		||||
	NVME_CAP_CRMS_CRWMS	= 1ULL << 60,
 | 
			
		||||
	NVME_CAP_CRMS_CRWMS	= 1ULL << 59,
 | 
			
		||||
	NVME_CAP_CRMS_CRIMS	= 1ULL << 60,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct nvme_id_power_state {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -770,14 +770,11 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
 | 
			
		|||
 **/
 | 
			
		||||
void blk_trace_shutdown(struct request_queue *q)
 | 
			
		||||
{
 | 
			
		||||
	mutex_lock(&q->debugfs_mutex);
 | 
			
		||||
	if (rcu_dereference_protected(q->blk_trace,
 | 
			
		||||
				      lockdep_is_held(&q->debugfs_mutex))) {
 | 
			
		||||
		__blk_trace_startstop(q, 0);
 | 
			
		||||
		__blk_trace_remove(q);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	mutex_unlock(&q->debugfs_mutex);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_BLK_CGROUP
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue