forked from mirrors/linux
		
	sbitmap: Delete old sbitmap_queue_get_shallow()
Since __sbitmap_queue_get_shallow() was introduced in commit c05e667337
("sbitmap: add sbitmap_get_shallow() operation"), it has not been used.
Delete __sbitmap_queue_get_shallow() and rename public
__sbitmap_queue_get_shallow() -> sbitmap_queue_get_shallow() as it is odd
to have public __foo but no foo at all.
Signed-off-by: John Garry <john.garry@huawei.com>
Link: https://lore.kernel.org/r/1644322024-105340-1-git-send-email-john.garry@huawei.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
			
			
This commit is contained in:
		
							parent
							
								
									3301bc5335
								
							
						
					
					
						commit
						3f607293b7
					
				
					 3 changed files with 8 additions and 34 deletions
				
			
		|  | @ -107,7 +107,7 @@ static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, | ||||||
| 		return BLK_MQ_NO_TAG; | 		return BLK_MQ_NO_TAG; | ||||||
| 
 | 
 | ||||||
| 	if (data->shallow_depth) | 	if (data->shallow_depth) | ||||||
| 		return __sbitmap_queue_get_shallow(bt, data->shallow_depth); | 		return sbitmap_queue_get_shallow(bt, data->shallow_depth); | ||||||
| 	else | 	else | ||||||
| 		return __sbitmap_queue_get(bt); | 		return __sbitmap_queue_get(bt); | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -135,7 +135,7 @@ struct sbitmap_queue { | ||||||
| 
 | 
 | ||||||
| 	/**
 | 	/**
 | ||||||
| 	 * @min_shallow_depth: The minimum shallow depth which may be passed to | 	 * @min_shallow_depth: The minimum shallow depth which may be passed to | ||||||
| 	 * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow(). | 	 * sbitmap_queue_get_shallow() | ||||||
| 	 */ | 	 */ | ||||||
| 	unsigned int min_shallow_depth; | 	unsigned int min_shallow_depth; | ||||||
| }; | }; | ||||||
|  | @ -463,7 +463,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, | ||||||
| 					unsigned int *offset); | 					unsigned int *offset); | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct |  * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct | ||||||
|  * sbitmap_queue, limiting the depth used from each word, with preemption |  * sbitmap_queue, limiting the depth used from each word, with preemption | ||||||
|  * already disabled. |  * already disabled. | ||||||
|  * @sbq: Bitmap queue to allocate from. |  * @sbq: Bitmap queue to allocate from. | ||||||
|  | @ -475,7 +475,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, | ||||||
|  * |  * | ||||||
|  * Return: Non-negative allocated bit number if successful, -1 otherwise. |  * Return: Non-negative allocated bit number if successful, -1 otherwise. | ||||||
|  */ |  */ | ||||||
| int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, | int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, | ||||||
| 			      unsigned int shallow_depth); | 			      unsigned int shallow_depth); | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  | @ -498,32 +498,6 @@ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq, | ||||||
| 	return nr; | 	return nr; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /**
 |  | ||||||
|  * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct |  | ||||||
|  * sbitmap_queue, limiting the depth used from each word. |  | ||||||
|  * @sbq: Bitmap queue to allocate from. |  | ||||||
|  * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to |  | ||||||
|  *       sbitmap_queue_clear()). |  | ||||||
|  * @shallow_depth: The maximum number of bits to allocate from a single word. |  | ||||||
|  * See sbitmap_get_shallow(). |  | ||||||
|  * |  | ||||||
|  * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after |  | ||||||
|  * initializing @sbq. |  | ||||||
|  * |  | ||||||
|  * Return: Non-negative allocated bit number if successful, -1 otherwise. |  | ||||||
|  */ |  | ||||||
| static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, |  | ||||||
| 					    unsigned int *cpu, |  | ||||||
| 					    unsigned int shallow_depth) |  | ||||||
| { |  | ||||||
| 	int nr; |  | ||||||
| 
 |  | ||||||
| 	*cpu = get_cpu(); |  | ||||||
| 	nr = __sbitmap_queue_get_shallow(sbq, shallow_depth); |  | ||||||
| 	put_cpu(); |  | ||||||
| 	return nr; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /**
 | /**
 | ||||||
|  * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the |  * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the | ||||||
|  * minimum shallow depth that will be used. |  * minimum shallow depth that will be used. | ||||||
|  |  | ||||||
|  | @ -557,14 +557,14 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, | int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, | ||||||
| 			      unsigned int shallow_depth) | 			      unsigned int shallow_depth) | ||||||
| { | { | ||||||
| 	WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); | 	WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); | ||||||
| 
 | 
 | ||||||
| 	return sbitmap_get_shallow(&sbq->sb, shallow_depth); | 	return sbitmap_get_shallow(&sbq->sb, shallow_depth); | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow); | EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow); | ||||||
| 
 | 
 | ||||||
| void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, | void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, | ||||||
| 				     unsigned int min_shallow_depth) | 				     unsigned int min_shallow_depth) | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 John Garry
						John Garry